net/sfc: add header segments check for EF100 Tx
authorIvan Malov <ivan.malov@oktetlabs.ru>
Tue, 13 Oct 2020 13:45:38 +0000 (14:45 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 16 Oct 2020 17:48:18 +0000 (19:48 +0200)
EF100 native Tx datapath demands that packet header be contiguous
when partial checksum offloads are used since helper function is
used to calculate pseudo-header checksum (and the function requires
contiguous header).

Add an explicit check for this assumption and restructure the code
to avoid TSO header linearisation check since TSO header
linearisation is not done on EF100 native Tx datapath.

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
drivers/net/sfc/sfc_dp_tx.h
drivers/net/sfc/sfc_ef100_tx.c
drivers/net/sfc/sfc_ef10_tx.c
drivers/net/sfc/sfc_tx.c

index 67aa398..bed8ce8 100644 (file)
@@ -206,14 +206,38 @@ sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx)
        return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa;
 }
 
+static inline unsigned int
+sfc_dp_tx_pkt_extra_hdr_segs(struct rte_mbuf **m_seg,
+                            unsigned int *header_len_remaining)
+{
+       unsigned int nb_extra_header_segs = 0;
+
+       while (rte_pktmbuf_data_len(*m_seg) < *header_len_remaining) {
+               *header_len_remaining -= rte_pktmbuf_data_len(*m_seg);
+               *m_seg = (*m_seg)->next;
+               ++nb_extra_header_segs;
+       }
+
+       return nb_extra_header_segs;
+}
+
 static inline int
 sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
+                          unsigned int max_nb_header_segs,
+                          unsigned int tso_bounce_buffer_len,
                           uint32_t tso_tcp_header_offset_limit,
                           unsigned int max_fill_level,
                           unsigned int nb_tso_descs,
                           unsigned int nb_vlan_descs)
 {
        unsigned int descs_required = m->nb_segs;
+       unsigned int tcph_off = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ?
+                                m->outer_l2_len + m->outer_l3_len : 0) +
+                               m->l2_len + m->l3_len;
+       unsigned int header_len = tcph_off + m->l4_len;
+       unsigned int header_len_remaining = header_len;
+       unsigned int nb_header_segs = 1;
+       struct rte_mbuf *m_seg = m;
 
 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
        int ret;
@@ -229,10 +253,29 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
        }
 #endif
 
-       if (m->ol_flags & PKT_TX_TCP_SEG) {
-               unsigned int tcph_off = m->l2_len + m->l3_len;
-               unsigned int header_len;
+       if (max_nb_header_segs != 0) {
+               /* There is a limit on the number of header segments. */
 
+               nb_header_segs +=
+                   sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
+                                                &header_len_remaining);
+
+               if (unlikely(nb_header_segs > max_nb_header_segs)) {
+                       /*
+                        * The number of header segments is too large.
+                        *
+                        * If TSO is requested and if the datapath supports
+                        * linearisation of TSO headers, allow the packet
+                        * to proceed with additional checks below.
+                        * Otherwise, throw an error.
+                        */
+                       if ((m->ol_flags & PKT_TX_TCP_SEG) == 0 ||
+                           tso_bounce_buffer_len == 0)
+                               return EINVAL;
+               }
+       }
+
+       if (m->ol_flags & PKT_TX_TCP_SEG) {
                switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
                case 0:
                        break;
@@ -242,30 +285,38 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
                        if (!(m->ol_flags &
                              (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
                                return EINVAL;
-
-                       tcph_off += m->outer_l2_len + m->outer_l3_len;
                }
 
-               header_len = tcph_off + m->l4_len;
-
                if (unlikely(tcph_off > tso_tcp_header_offset_limit))
                        return EINVAL;
 
                descs_required += nb_tso_descs;
 
                /*
-                * Extra descriptor that is required when a packet header
-                * is separated from remaining content of the first segment.
+                * If headers segments are already counted above, here
+                * nothing is done since remaining length is smaller
+                * then current segment size.
+                */
+               nb_header_segs +=
+                   sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
+                                                &header_len_remaining);
+
+               /*
+                * Extra descriptor which is required when (a part of) payload
+                * shares the same segment with (a part of) the header.
                 */
-               if (rte_pktmbuf_data_len(m) > header_len) {
+               if (rte_pktmbuf_data_len(m_seg) > header_len_remaining)
                        descs_required++;
-               } else if (rte_pktmbuf_data_len(m) < header_len &&
-                        unlikely(header_len > SFC_TSOH_STD_LEN)) {
-                       /*
-                        * Header linearization is required and
-                        * the header is too big to be linearized
-                        */
-                       return EINVAL;
+
+               if (tso_bounce_buffer_len != 0) {
+                       if (nb_header_segs > 1 &&
+                           unlikely(header_len > tso_bounce_buffer_len)) {
+                               /*
+                                * Header linearization is required and
+                                * the header is too big to be linearized
+                                */
+                               return EINVAL;
+                       }
                }
        }
 
index 96a9b6c..7961fa8 100644 (file)
@@ -95,9 +95,11 @@ sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        for (i = 0; i < nb_pkts; i++) {
                struct rte_mbuf *m = tx_pkts[i];
+               unsigned int max_nb_header_segs = 0;
                int ret;
 
-               ret = sfc_dp_tx_prepare_pkt(m, 0, txq->max_fill_level, 0, 0);
+               ret = sfc_dp_tx_prepare_pkt(m, max_nb_header_segs, 0,
+                                           0, txq->max_fill_level, 0, 0);
                if (unlikely(ret != 0)) {
                        rte_errno = ret;
                        break;
index 6fb4ac8..961689d 100644 (file)
@@ -352,7 +352,7 @@ sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        }
                }
 #endif
-               ret = sfc_dp_tx_prepare_pkt(m,
+               ret = sfc_dp_tx_prepare_pkt(m, 0, SFC_TSOH_STD_LEN,
                                txq->tso_tcp_header_offset_limit,
                                txq->max_fill_level,
                                SFC_EF10_TSO_OPT_DESCS_NUM, 0);
index 4ea6148..d50d49c 100644 (file)
@@ -718,7 +718,7 @@ sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * insertion offload is requested regardless the offload
                 * requested/supported.
                 */
-               ret = sfc_dp_tx_prepare_pkt(tx_pkts[i],
+               ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], 0, SFC_TSOH_STD_LEN,
                                encp->enc_tx_tso_tcp_header_offset_limit,
                                txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS,
                                1);