net/pfe: add device start/stop
[dpdk.git] / drivers / net / sfc / sfc_dp_tx.h
index ebc9418..7105d27 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "sfc_dp.h"
 #include "sfc_debug.h"
+#include "sfc_tso.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -156,12 +157,17 @@ struct sfc_dp_tx {
        struct sfc_dp                   dp;
 
        unsigned int                    features;
-#define SFC_DP_TX_FEAT_VLAN_INSERT     0x1
-#define SFC_DP_TX_FEAT_TSO             0x2
-#define SFC_DP_TX_FEAT_MULTI_SEG       0x4
-#define SFC_DP_TX_FEAT_MULTI_PROCESS   0x8
-#define SFC_DP_TX_FEAT_MULTI_POOL      0x10
-#define SFC_DP_TX_FEAT_REFCNT          0x20
+#define SFC_DP_TX_FEAT_MULTI_PROCESS   0x1
+       /**
+        * Tx offload capabilities supported by the datapath on device
+        * level only if HW/FW supports it.
+        */
+       uint64_t                        dev_offload_capa;
+       /**
+        * Tx offload capabilities supported by the datapath per-queue
+        * if HW/FW supports it.
+        */
+       uint64_t                        queue_offload_capa;
        sfc_dp_tx_get_dev_info_t        *get_dev_info;
        sfc_dp_tx_qsize_up_rings_t      *qsize_up_rings;
        sfc_dp_tx_qcreate_t             *qcreate;
@@ -194,6 +200,12 @@ sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
 /** Get Tx datapath ops by the datapath TxQ handle */
 const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
 
+static inline uint64_t
+sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx)
+{
+       return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa;
+}
+
 static inline int
 sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
                           uint32_t tso_tcp_header_offset_limit,
@@ -219,7 +231,22 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
 
        if (m->ol_flags & PKT_TX_TCP_SEG) {
                unsigned int tcph_off = m->l2_len + m->l3_len;
-               unsigned int header_len = tcph_off + m->l4_len;
+               unsigned int header_len;
+
+               switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
+               case 0:
+                       break;
+               case PKT_TX_TUNNEL_VXLAN:
+                       /* FALLTHROUGH */
+               case PKT_TX_TUNNEL_GENEVE:
+                       if (!(m->ol_flags &
+                             (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
+                               return EINVAL;
+
+                       tcph_off += m->outer_l2_len + m->outer_l3_len;
+               }
+
+               header_len = tcph_off + m->l4_len;
 
                if (unlikely(tcph_off > tso_tcp_header_offset_limit))
                        return EINVAL;
@@ -230,8 +257,16 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
                 * Extra descriptor that is required when a packet header
                 * is separated from remaining content of the first segment.
                 */
-               if (rte_pktmbuf_data_len(m) > header_len)
+               if (rte_pktmbuf_data_len(m) > header_len) {
                        descs_required++;
+               } else if (rte_pktmbuf_data_len(m) < header_len &&
+                        unlikely(header_len > SFC_TSOH_STD_LEN)) {
+                       /*
+                        * Header linearization is required and
+                        * the header is too big to be linearized
+                        */
+                       return EINVAL;
+               }
        }
 
        /*