X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef100_tx.c;h=e166fda888b19d48e697479d6cc024ad0c1d1fca;hb=44db08d53be366d69bb7d16bffc3e55ba2d7398a;hp=e30d5369d7ea971d82aa396d49cd0e72b9c421ad;hpb=4f936666d7909286b0030b118c8d304e9195f7df;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c index e30d5369d7..e166fda888 100644 --- a/drivers/net/sfc/sfc_ef100_tx.c +++ b/drivers/net/sfc/sfc_ef100_tx.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2019-2021 Xilinx, Inc. * Copyright(c) 2018-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -98,11 +99,26 @@ static int sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq, struct rte_mbuf *m) { - size_t header_len = m->l2_len + m->l3_len + m->l4_len; + size_t header_len = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ? + m->outer_l2_len + m->outer_l3_len : 0) + + m->l2_len + m->l3_len + m->l4_len; size_t payload_len = m->pkt_len - header_len; unsigned long mss_conformant_max_payload_len; unsigned int nb_payload_descs; +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG + switch (m->ol_flags & PKT_TX_TUNNEL_MASK) { + case 0: + /* FALLTHROUGH */ + case PKT_TX_TUNNEL_VXLAN: + /* FALLTHROUGH */ + case PKT_TX_TUNNEL_GENEVE: + break; + default: + return ENOTSUP; + } +#endif + mss_conformant_max_payload_len = m->tso_segsz * txq->tso_max_nb_outgoing_frames; @@ -294,6 +310,19 @@ sfc_ef100_tx_reap(struct sfc_ef100_txq *txq) sfc_ef100_tx_reap_num_descs(txq, sfc_ef100_tx_process_events(txq)); } +static void +sfc_ef100_tx_qdesc_prefix_create(const struct rte_mbuf *m, efx_oword_t *tx_desc) +{ + efx_mport_id_t *mport_id = + RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset, efx_mport_id_t *); + + EFX_POPULATE_OWORD_3(*tx_desc, + ESF_GZ_TX_PREFIX_EGRESS_MPORT, + mport_id->id, + ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1, + ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX); +} + static uint8_t sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel) { @@ -366,6 +395,16 @@ sfc_ef100_tx_qdesc_send_create(const struct rte_mbuf *m, efx_oword_t *tx_desc) ESF_GZ_TX_SEND_CSO_OUTER_L3, outer_l3, ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4, ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND); + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + efx_oword_t tx_desc_extra_fields; + + EFX_POPULATE_OWORD_2(tx_desc_extra_fields, + ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1, + ESF_GZ_TX_SEND_VLAN_INSERT_TCI, m->vlan_tci); + + EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields); + } } static void @@ -383,32 +422,52 @@ sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m, uint16_t nb_header_descs, uint16_t nb_payload_descs, size_t header_len, size_t payload_len, + size_t outer_iph_off, size_t outer_udph_off, size_t iph_off, size_t tcph_off, efx_oword_t *tx_desc) { efx_oword_t tx_desc_extra_fields; + int ed_outer_udp_len = (outer_udph_off != 0) ? 1 : 0; + int ed_outer_ip_len = (outer_iph_off != 0) ? 1 : 0; + int ed_outer_ip_id = (outer_iph_off != 0) ? + ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 : 0; /* * If no tunnel encapsulation is present, then the ED_INNER * fields should be used. */ int ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16; + uint8_t inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3( + m->ol_flags & PKT_TX_TUNNEL_MASK); - EFX_POPULATE_OWORD_7(*tx_desc, + EFX_POPULATE_OWORD_10(*tx_desc, ESF_GZ_TX_TSO_MSS, m->tso_segsz, ESF_GZ_TX_TSO_HDR_NUM_SEGS, nb_header_descs, ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, nb_payload_descs, + ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, ed_outer_ip_id, ESF_GZ_TX_TSO_ED_INNER_IP4_ID, ed_inner_ip_id, + ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, ed_outer_ip_len, ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1, + ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, ed_outer_udp_len, ESF_GZ_TX_TSO_HDR_LEN_W, header_len >> 1, ESF_GZ_TX_TSO_PAYLOAD_LEN, payload_len); - EFX_POPULATE_OWORD_5(tx_desc_extra_fields, + EFX_POPULATE_OWORD_9(tx_desc_extra_fields, + /* + * Outer offsets are required for outer IPv4 ID + * and length edits in the case of tunnel TSO. + */ + ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_iph_off >> 1, + ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_udph_off >> 1, /* * Inner offsets are required for inner IPv4 ID - * and IP length edits. + * and IP length edits and partial checksum + * offload in the case of tunnel TSO. */ ESF_GZ_TX_TSO_INNER_L3_OFF_W, iph_off >> 1, ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcph_off >> 1, + ESF_GZ_TX_TSO_CSO_INNER_L4, + inner_l3 != ESE_GZ_TX_DESC_CS_INNER_L3_OFF, + ESF_GZ_TX_TSO_CSO_INNER_L3, inner_l3, /* * Use outer full checksum offloads which do * not require any extra information. @@ -418,6 +477,14 @@ sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m, ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO); EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields); + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + EFX_POPULATE_OWORD_2(tx_desc_extra_fields, + ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1, + ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci); + + EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields); + } } static inline void @@ -436,6 +503,7 @@ sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added) * operations that follow it (i.e. doorbell write). */ rte_write32(dword.ed_u32[0], txq->doorbell); + txq->dp.dpq.tx_dbells++; sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)", EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX), @@ -471,6 +539,11 @@ sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m) SFC_MBUF_SEG_LEN_MAX)); } + if (m->ol_flags & sfc_dp_mport_override) { + /* Tx override prefix descriptor will be used */ + extra_descs++; + } + /* * Any segment of scattered packet cannot be bigger than maximum * segment length. Make sure that subsequent segments do not need @@ -491,12 +564,21 @@ sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq, unsigned int seg_split = 0; unsigned int tso_desc_id; unsigned int id; + size_t outer_iph_off; + size_t outer_udph_off; size_t iph_off; size_t tcph_off; size_t header_len; size_t remaining_hdr_len; - iph_off = m->l2_len; + if (m->ol_flags & PKT_TX_TUNNEL_MASK) { + outer_iph_off = m->outer_l2_len; + outer_udph_off = outer_iph_off + m->outer_l3_len; + } else { + outer_iph_off = 0; + outer_udph_off = 0; + } + iph_off = outer_udph_off + m->l2_len; tcph_off = iph_off + m->l3_len; header_len = tcph_off + m->l4_len; @@ -550,6 +632,7 @@ sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq, sfc_ef100_tx_qdesc_tso_create(m, nb_hdr_descs, nb_pld_descs, header_len, rte_pktmbuf_pkt_len(m) - header_len, + outer_iph_off, outer_udph_off, iph_off, tcph_off, &txq->txq_hw_ring[tso_desc_id]); @@ -607,6 +690,12 @@ sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) break; } + if (m_seg->ol_flags & sfc_dp_mport_override) { + id = added++ & txq->ptr_mask; + sfc_ef100_tx_qdesc_prefix_create(m_seg, + &txq->txq_hw_ring[id]); + } + if (m_seg->ol_flags & PKT_TX_TCP_SEG) { m_seg = sfc_ef100_xmit_tso_pkt(txq, m_seg, &added); } else { @@ -646,6 +735,9 @@ sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } dma_desc_space -= (added - pkt_start); + + sfc_pkts_bytes_add(&txq->dp.dpq.stats, 1, + rte_pktmbuf_pkt_len(*pktp)); } if (likely(added != txq->added)) { @@ -876,15 +968,19 @@ struct sfc_dp_tx sfc_ef100_tx = { .type = SFC_DP_TX, .hw_fw_caps = SFC_DP_HW_FW_CAP_EF100, }, - .features = SFC_DP_TX_FEAT_MULTI_PROCESS, + .features = SFC_DP_TX_FEAT_MULTI_PROCESS | + SFC_DP_TX_FEAT_STATS, .dev_offload_capa = 0, - .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + .queue_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_MULTI_SEGS | - DEV_TX_OFFLOAD_TCP_TSO, + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO, .get_dev_info = sfc_ef100_get_dev_info, .qsize_up_rings = sfc_ef100_tx_qsize_up_rings, .qcreate = sfc_ef100_tx_qcreate,