tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
- ol_flags |= PKT_TX_VLAN_PKT;
+ ol_flags |= PKT_TX_VLAN;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= PKT_TX_QINQ;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
+ ol_flags = PKT_TX_VLAN;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= PKT_TX_QINQ;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
+ ol_flags = PKT_TX_VLAN;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= PKT_TX_QINQ;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
}
/* insert vlan info if necessary */
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & PKT_TX_VLAN) {
if (rte_vlan_insert(&mbuf)) {
rte_pktmbuf_free(mbuf);
continue;
first_buf->nb_segs = count;
first_buf->pkt_len = total_length;
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & PKT_TX_VLAN) {
first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
first_buf->vlan_tci = mbuf->vlan_tci;
}
pkt_buf->nb_segs = 1;
pkt_buf->next = NULL;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
pkt_buf->vlan_tci = m->vlan_tci;
}
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
rte_wmb();
- if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (mbuf->ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
/* Mark it as a CONTEXT descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
CTXT, 1);
tx_start_bd->nbd = rte_cpu_to_le_16(2);
- if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m0->ol_flags & PKT_TX_VLAN) {
tx_start_bd->vlan_or_ethertype =
rte_cpu_to_le_16(m0->vlan_tci);
tx_start_bd->bd_flags.as_bitfield |=
{
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_VLAN | PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
- PKT_TX_QINQ_PKT) ||
+ PKT_TX_QINQ) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
(txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
return true;
vlan_tag_flags = 0;
/* HW can accelerate only outer vlan in QinQ mode */
- if (tx_pkt->ol_flags & PKT_TX_QINQ_PKT) {
+ if (tx_pkt->ol_flags & PKT_TX_QINQ) {
vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
tx_pkt->vlan_tci_outer;
outer_tpid_bd = txq->bp->outer_tpid_bd &
BNXT_OUTER_TPID_BD_MASK;
vlan_tag_flags |= outer_tpid_bd;
- } else if (tx_pkt->ol_flags & PKT_TX_VLAN_PKT) {
+ } else if (tx_pkt->ol_flags & PKT_TX_VLAN) {
/* shurd: Should this mask at
* TX_BD_LONG_CFA_META_VLAN_VID_MASK?
*/
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
}
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & PKT_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
txq->stats.tx_cso += m->tso_segsz;
}
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely(((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT) ||
+ & PKT_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
goto send_n_return;
}
- if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+ if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
int ret = rte_vlan_insert(bufs);
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT)) {
+ & PKT_TX_VLAN)) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_VLAN_PKT)
+ PKT_TX_VLAN)
#define E1000_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
popts_spec = 0;
/* Set VLAN Tag offload fields. */
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & PKT_TX_VLAN) {
cmd_type_len |= E1000_TXD_CMD_VLE;
popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
}
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
tx_offload_mask.data |= TX_VLAN_CMP_MASK;
/* check if TCP segmentation required for this packet */
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
- cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN) != 0];
cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
return cmdtype;
}
#endif
#define FM10K_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
/* set vlan if requested */
- if (mb->ol_flags & PKT_TX_VLAN_PKT)
+ if (mb->ol_flags & PKT_TX_VLAN)
q->hw_ring[q->next_free].vlan = mb->vlan_tci;
else
q->hw_ring[q->next_free].vlan = 0;
task->pkt_info2 = 0;
/* Base VLAN */
- if (unlikely(ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(ol_flags & PKT_TX_VLAN)) {
vlan_tag = mbuf->vlan_tci;
hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
vlan_tag >> VLAN_PRIO_SHIFT);
* To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
* be added to the position close to the IP header when PVID is enabled.
*/
- if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
- PKT_TX_QINQ_PKT)) {
+ if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN |
+ PKT_TX_QINQ)) {
desc->tx.ol_type_vlan_len_msec |=
rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
- if (ol_flags & PKT_TX_QINQ_PKT)
+ if (ol_flags & PKT_TX_QINQ)
desc->tx.outer_vlan_tag =
rte_cpu_to_le_16(rxm->vlan_tci_outer);
else
rte_cpu_to_le_16(rxm->vlan_tci);
}
- if (ol_flags & PKT_TX_QINQ_PKT ||
- ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
+ if (ol_flags & PKT_TX_QINQ ||
+ ((ol_flags & PKT_TX_VLAN) && txq->pvid_sw_shift_en)) {
desc->tx.type_cs_vlan_tso_len |=
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
* implementation function named hns3_prep_pkts to inform users that
* these packets will be discarded.
*/
- if (m->ol_flags & PKT_TX_QINQ_PKT)
+ if (m->ol_flags & PKT_TX_QINQ)
return -EINVAL;
eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
- if (m->ol_flags & PKT_TX_VLAN_PKT)
+ if (m->ol_flags & PKT_TX_VLAN)
return -EINVAL;
/* Ensure the incoming packet is not a QinQ packet */
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_TCP_SEG | \
- PKT_TX_QINQ_PKT | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_QINQ | \
+ PKT_TX_VLAN | \
PKT_TX_TUNNEL_MASK | \
I40E_TX_IEEE1588_TMST)
{
static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TCP_SEG |
- PKT_TX_QINQ_PKT |
+ PKT_TX_QINQ |
PKT_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
}
/* Descriptor based VLAN insertion */
- if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
- if (ol_flags & PKT_TX_QINQ_PKT) {
+ if (ol_flags & PKT_TX_QINQ) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
{
if (flags & PKT_TX_TCP_SEG)
return 1;
- if (flags & PKT_TX_VLAN_PKT &&
+ if (flags & PKT_TX_VLAN &&
vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
return 1;
return 0;
}
/* Descriptor based VLAN insertion */
- if (ol_flags & PKT_TX_VLAN_PKT &&
+ if (ol_flags & PKT_TX_VLAN &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
cd_type_cmd_tso_mss |=
iavf_set_tso_ctx(tx_pkt, tx_offload);
- if (ol_flags & PKT_TX_VLAN_PKT &&
+ if (ol_flags & PKT_TX_VLAN &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
tx_offload_mask.vlan_tci = 0xffff;
/* check if TCP segmentation required for this packet */
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};
- cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN) != 0];
cmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
return cmdtype;
}
uint32_t offset = 0;
bool start, done;
bool encap;
- bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
+ bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN);
uint16_t vlan_tci = txm->vlan_tci;
uint64_t ol_flags = txm->ol_flags;
if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
stats->no_csum++;
- has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
+ has_vlan = (ol_flags & PKT_TX_VLAN);
encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
((ol_flags & PKT_TX_OUTER_IPV4) ||
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
/* Specify which HW CTX to upload. */
mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & PKT_TX_VLAN)
tx_offload_mask.vlan_tci |= ~0;
- }
/* check if TCP segmentation required for this packet */
if (ol_flags & PKT_TX_TCP_SEG) {
{
uint32_t cmdtype = 0;
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
* should be set regardless of HW offload.
*/
off = loc->mbuf->outer_l2_len;
- if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN)
off += sizeof(struct rte_vlan_hdr);
set = (off >> 1) << 8; /* Outer L3 offset. */
off += loc->mbuf->outer_l3_len;
0 : 0;
/* Engage VLAN tag insertion feature if requested. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
/*
* We should get here only if device support
* this feature correctly.
* the required space in WQE ring buffer.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN)
vlan = sizeof(struct rte_vlan_hdr);
inlen = loc->mbuf->l2_len + vlan +
loc->mbuf->l3_len + loc->mbuf->l4_len;
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & PKT_TX_VLAN)
txq->stats.obytes += sizeof(struct rte_vlan_hdr);
#endif
/*
* to estimate the required space for WQE.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN)
vlan = sizeof(struct rte_vlan_hdr);
inlen = dlen + vlan;
/* Check against minimal length. */
}
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
vlan = sizeof(struct rte_vlan_hdr);
}
/*
return MLX5_TXCMP_CODE_SINGLE;
/* Check if eMPW can be engaged. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
+ unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN) &&
(!MLX5_TXOFF_CONFIG(INLINE) ||
unlikely((rte_pktmbuf_data_len(loc->mbuf) +
sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
return false;
/* There must be no VLAN packets in eMPW loop. */
if (MLX5_TXOFF_CONFIG(VLAN))
- MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN));
/* Check if the scheduling is requested. */
if (MLX5_TXOFF_CONFIG(TXPP) &&
loc->mbuf->ol_flags & txq->ts_mask)
}
/* Inline entire packet, optional VLAN insertion. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
/*
* The packet length must be checked in
* mlx5_tx_able_to_empw() and packet
MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
if (MLX5_TXOFF_CONFIG(VLAN))
MLX5_ASSERT(!(loc->mbuf->ol_flags &
- PKT_TX_VLAN_PKT));
+ PKT_TX_VLAN));
mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
/* We have to store mbuf in elts.*/
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
inlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
vlan = sizeof(struct rte_vlan_hdr);
inlen += vlan;
}
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & PKT_TX_VLAN)
txq->stats.obytes +=
sizeof(struct rte_vlan_hdr);
#endif
NDIS_PKTINFO_TYPE_HASHVAL);
*pi_data = queue_id;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
NDIS_PKTINFO_TYPE_VLAN);
*pi_data = m->vlan_tci;
nfp_net_tx_tso(txq, &txd, pkt);
nfp_net_tx_cksum(txq, &txd, pkt);
- if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
+ if ((pkt->ol_flags & PKT_TX_VLAN) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
txd.flags |= PCIE_DESC_TX_VLAN;
txd.vlan = pkt->vlan_tci;
}
/* Descriptor based VLAN insertion */
- if (tx_ol_flags & PKT_TX_VLAN_PKT) {
+ if (tx_ol_flags & PKT_TX_VLAN) {
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
PKT_TX_IPV6)
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_TUNNEL_MASK)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4,
ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
efx_oword_t tx_desc_extra_fields;
EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci);
/* ef10_simple does not support TSO and VLAN insertion */
if (unlikely(m->ol_flags &
- (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) {
+ (PKT_TX_TCP_SEG | PKT_TX_VLAN))) {
rte_errno = ENOTSUP;
break;
}
sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
efx_desc_t **pend)
{
- uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN) ?
m->vlan_tci : 0);
if (this_tag == txq->hw_vlan_tci)
PKT_TX_OUTER_IPV4 |
PKT_TX_IPV6 |
PKT_TX_IPV4 |
- PKT_TX_VLAN_PKT |
+ PKT_TX_VLAN |
PKT_TX_L4_MASK |
PKT_TX_TCP_SEG |
PKT_TX_TUNNEL_MASK |
vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
}
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & PKT_TX_VLAN) {
tx_offload_mask.vlan_tci |= ~0;
vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
}
tmp |= TXGBE_TXD_IPCS;
tmp |= TXGBE_TXD_L4CS;
}
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
tmp |= TXGBE_TXD_CC;
return tmp;
{
uint32_t cmdtype = 0;
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
cmdtype |= TXGBE_TXD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
cmdtype |= TXGBE_TXD_TSE;
struct rte_mbuf *m = bufs[i];
/* Do VLAN tag insertion */
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
int error = rte_vlan_insert(&m);
if (unlikely(error)) {
rte_pktmbuf_free(m);
#endif
/* Do VLAN tag insertion */
- if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(m->ol_flags & PKT_TX_VLAN)) {
error = rte_vlan_insert(&m);
/* rte_vlan_insert() may change pointer
* even in the case of failure
#include "vmxnet3_ethdev.h"
#define VMXNET3_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_L4_MASK | \
/* Add VLAN tag if present */
gdesc = txq->cmd_ring.base + first2fill;
- if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ if (txm->ol_flags & PKT_TX_VLAN) {
gdesc->txd.ti = 1;
gdesc->txd.tci = txm->vlan_tci;
}
(vh->vlan_tci != vlan_tag_be))
vh->vlan_tci = vlan_tag_be;
} else {
- m->ol_flags |= PKT_TX_VLAN_PKT;
+ m->ol_flags |= PKT_TX_VLAN;
/*
* Find the right seg to adjust the data len when offset is
*/
#define PKT_TX_QINQ (1ULL << 49)
/** This old name is deprecated. */
-#define PKT_TX_QINQ_PKT PKT_TX_QINQ
+#define PKT_TX_QINQ_PKT RTE_DEPRECATED(PKT_TX_QINQ_PKT) PKT_TX_QINQ
/**
* TCP segmentation offload. To enable this offload feature for a
*/
#define PKT_TX_VLAN (1ULL << 57)
/* this old name is deprecated */
-#define PKT_TX_VLAN_PKT PKT_TX_VLAN
+#define PKT_TX_VLAN_PKT RTE_DEPRECATED(PKT_TX_VLAN_PKT) PKT_TX_VLAN
/**
* Offload the IP checksum of an external header in the hardware. The
PKT_TX_OUTER_IPV6 | \
PKT_TX_OUTER_IPV4 | \
PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_IEEE1588_TMST | \
PKT_TX_TCP_SEG | \
- PKT_TX_QINQ_PKT | \
+ PKT_TX_QINQ | \
PKT_TX_TUNNEL_MASK | \
PKT_TX_MACSEC | \
PKT_TX_SEC_OFFLOAD | \