X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef100_tx.c;fp=drivers%2Fnet%2Fsfc%2Fsfc_ef100_tx.c;h=c0944df166b9732219cb8183a448ec14d0a6bf8c;hb=daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f;hp=290414de9d6dac323c771f8bc04ea2f6d100effe;hpb=735155ee3b7c126b14ac28c5667d5a2d811ddf4c;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c index 290414de9d..c0944df166 100644 --- a/drivers/net/sfc/sfc_ef100_tx.c +++ b/drivers/net/sfc/sfc_ef100_tx.c @@ -99,7 +99,7 @@ static int sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq, struct rte_mbuf *m) { - size_t header_len = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ? + size_t header_len = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? m->outer_l2_len + m->outer_l3_len : 0) + m->l2_len + m->l3_len + m->l4_len; size_t payload_len = m->pkt_len - header_len; @@ -107,12 +107,12 @@ sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq, unsigned int nb_payload_descs; #ifdef RTE_LIBRTE_SFC_EFX_DEBUG - switch (m->ol_flags & PKT_TX_TUNNEL_MASK) { + switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { case 0: /* FALLTHROUGH */ - case PKT_TX_TUNNEL_VXLAN: + case RTE_MBUF_F_TX_TUNNEL_VXLAN: /* FALLTHROUGH */ - case PKT_TX_TUNNEL_GENEVE: + case RTE_MBUF_F_TX_TUNNEL_GENEVE: break; default: return ENOTSUP; @@ -165,11 +165,11 @@ sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * pseudo-header checksum which is calculated below, * but requires contiguous packet headers. */ - if ((m->ol_flags & PKT_TX_TUNNEL_MASK) && - (m->ol_flags & PKT_TX_L4_MASK)) { + if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) && + (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)) { calc_phdr_cksum = true; max_nb_header_segs = 1; - } else if (m->ol_flags & PKT_TX_TCP_SEG) { + } else if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { max_nb_header_segs = txq->tso_max_nb_header_descs; } @@ -181,7 +181,7 @@ sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, break; } - if (m->ol_flags & PKT_TX_TCP_SEG) { + if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { ret = sfc_ef100_tx_prepare_pkt_tso(txq, m); if (unlikely(ret != 0)) { rte_errno = ret; @@ -198,7 +198,7 @@ sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * and does not require any assistance. */ ret = rte_net_intel_cksum_flags_prepare(m, - m->ol_flags & ~PKT_TX_IP_CKSUM); + m->ol_flags & ~RTE_MBUF_F_TX_IP_CKSUM); if (unlikely(ret != 0)) { rte_errno = -ret; break; @@ -329,10 +329,10 @@ sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel) uint8_t inner_l3; switch (tx_tunnel) { - case PKT_TX_TUNNEL_VXLAN: + case RTE_MBUF_F_TX_TUNNEL_VXLAN: inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN; break; - case PKT_TX_TUNNEL_GENEVE: + case RTE_MBUF_F_TX_TUNNEL_GENEVE: inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE; break; default: @@ -352,25 +352,25 @@ sfc_ef100_tx_qdesc_send_create(const struct rte_mbuf *m, efx_oword_t *tx_desc) uint16_t part_cksum_w; uint16_t l4_offset_w; - if ((m->ol_flags & PKT_TX_TUNNEL_MASK) == 0) { - outer_l3 = (m->ol_flags & PKT_TX_IP_CKSUM); - outer_l4 = (m->ol_flags & PKT_TX_L4_MASK); + if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) == 0) { + outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM); + outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_L4_MASK); inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF; partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF; part_cksum_w = 0; l4_offset_w = 0; } else { - outer_l3 = (m->ol_flags & PKT_TX_OUTER_IP_CKSUM); - outer_l4 = (m->ol_flags & PKT_TX_OUTER_UDP_CKSUM); + outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM); + outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM); inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(m->ol_flags & - PKT_TX_TUNNEL_MASK); + RTE_MBUF_F_TX_TUNNEL_MASK); - switch (m->ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_TCP_CKSUM: + switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_TCP_CKSUM: partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP; part_cksum_w = offsetof(struct rte_tcp_hdr, cksum) >> 1; break; - case PKT_TX_UDP_CKSUM: + case RTE_MBUF_F_TX_UDP_CKSUM: partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP; part_cksum_w = offsetof(struct rte_udp_hdr, dgram_cksum) >> 1; @@ -396,7 +396,7 @@ sfc_ef100_tx_qdesc_send_create(const struct rte_mbuf *m, efx_oword_t *tx_desc) ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4, ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND); - if (m->ol_flags & PKT_TX_VLAN) { + if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { efx_oword_t tx_desc_extra_fields; EFX_POPULATE_OWORD_2(tx_desc_extra_fields, @@ -437,7 +437,7 @@ sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m, */ int ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16; uint8_t inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3( - m->ol_flags & PKT_TX_TUNNEL_MASK); + m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK); EFX_POPULATE_OWORD_10(*tx_desc, ESF_GZ_TX_TSO_MSS, m->tso_segsz, @@ -478,7 +478,7 @@ sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m, EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields); - if (m->ol_flags & PKT_TX_VLAN) { + if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { EFX_POPULATE_OWORD_2(tx_desc_extra_fields, ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1, ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci); @@ -519,7 +519,7 @@ sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m) #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2); - if (m->ol_flags & PKT_TX_TCP_SEG) { + if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* Tx TSO descriptor */ extra_descs++; /* @@ -571,7 +571,7 @@ sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq, size_t header_len; size_t remaining_hdr_len; - if (m->ol_flags & PKT_TX_TUNNEL_MASK) { + if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { outer_iph_off = m->outer_l2_len; outer_udph_off = outer_iph_off + m->outer_l3_len; } else { @@ -696,7 +696,7 @@ sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) &txq->txq_hw_ring[id]); } - if (m_seg->ol_flags & PKT_TX_TCP_SEG) { + if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { m_seg = sfc_ef100_xmit_tso_pkt(txq, m_seg, &added); } else { id = added++ & txq->ptr_mask;