build: fix soname info for 19.11 compatibility
[dpdk.git] / drivers / net / sfc / sfc_ef10_tx.c
index 05f30cb..43e3447 100644 (file)
@@ -320,9 +320,10 @@ sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
 }
 
 static uint16_t
-sfc_ef10_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                      uint16_t nb_pkts)
 {
+       struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
        uint16_t i;
 
        for (i = 0; i < nb_pkts; i++) {
@@ -347,7 +348,10 @@ sfc_ef10_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                        }
                }
 #endif
-               ret = sfc_dp_tx_prepare_pkt(m);
+               ret = sfc_dp_tx_prepare_pkt(m,
+                               txq->tso_tcp_header_offset_limit,
+                               txq->max_fill_level,
+                               SFC_EF10_TSO_OPT_DESCS_NUM, 0);
                if (unlikely(ret != 0)) {
                        rte_errno = ret;
                        break;
@@ -362,13 +366,16 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
                      unsigned int *added, unsigned int *dma_desc_space,
                      bool *reap_done)
 {
-       size_t iph_off = m_seg->l2_len;
-       size_t tcph_off = m_seg->l2_len + m_seg->l3_len;
-       size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len;
+       size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ?
+                         m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
+                        m_seg->l2_len;
+       size_t tcph_off = iph_off + m_seg->l3_len;
+       size_t header_len = tcph_off + m_seg->l4_len;
        /* Offset of the payload in the last segment that contains the header */
        size_t in_off = 0;
-       const struct tcp_hdr *th;
+       const struct rte_tcp_hdr *th;
        uint16_t packet_id = 0;
+       uint16_t outer_packet_id = 0;
        uint32_t sent_seq;
        uint8_t *hdr_addr;
        rte_iova_t hdr_iova;
@@ -378,9 +385,6 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
        struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
        bool eop;
 
-       if (unlikely(tcph_off > txq->tso_tcp_header_offset_limit))
-               return EMSGSIZE;
-
        /*
         * Preliminary estimation of required DMA descriptors, including extra
         * descriptor for TSO header that is needed when the header is
@@ -446,6 +450,8 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
                /*
                 * Discard a packet if header linearization is needed but
                 * the header is too big.
+                * Duplicate Tx prepare check here to avoid spoil of
+                * memory if Tx prepare is skipped.
                 */
                if (unlikely(header_len > SFC_TSOH_STD_LEN))
                        return EMSGSIZE;
@@ -476,20 +482,19 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
         * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag.
         * If the packet is still IPv4, HW will simply start from zero IPID.
         */
-       if (first_m_seg->ol_flags & PKT_TX_IPV4) {
-               const struct ipv4_hdr *iphe4;
+       if (first_m_seg->ol_flags & PKT_TX_IPV4)
+               packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
 
-               iphe4 = (const struct ipv4_hdr *)(hdr_addr + iph_off);
-               rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
-               packet_id = rte_be_to_cpu_16(packet_id);
-       }
+       if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4)
+               outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
+                                               first_m_seg->outer_l2_len);
 
-       th = (const struct tcp_hdr *)(hdr_addr + tcph_off);
+       th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off);
        rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
        sent_seq = rte_be_to_cpu_32(sent_seq);
 
-       sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq,
-                       first_m_seg->tso_segsz);
+       sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id,
+                       sent_seq, first_m_seg->tso_segsz);
        (*added) += SFC_EF10_TSO_OPT_DESCS_NUM;
 
        sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
@@ -750,6 +755,62 @@ sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
                           txq->evq_read_ptr);
 }
 
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+static uint16_t
+sfc_ef10_simple_prepare_pkts(__rte_unused void *tx_queue,
+                            struct rte_mbuf **tx_pkts,
+                            uint16_t nb_pkts)
+{
+       uint16_t i;
+
+       for (i = 0; i < nb_pkts; i++) {
+               struct rte_mbuf *m = tx_pkts[i];
+               int ret;
+
+               ret = rte_validate_tx_offload(m);
+               if (unlikely(ret != 0)) {
+                       /*
+                        * Negative error code is returned by
+                        * rte_validate_tx_offload(), but positive are used
+                        * inside net/sfc PMD.
+                        */
+                       SFC_ASSERT(ret < 0);
+                       rte_errno = -ret;
+                       break;
+               }
+
+               /* ef10_simple does not support TSO and VLAN insertion */
+               if (unlikely(m->ol_flags &
+                            (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) {
+                       rte_errno = ENOTSUP;
+                       break;
+               }
+
+               /* ef10_simple does not support scattered packets */
+               if (unlikely(m->nb_segs != 1)) {
+                       rte_errno = ENOTSUP;
+                       break;
+               }
+
+               /*
+                * ef10_simple requires fast-free which ignores reference
+                * counters
+                */
+               if (unlikely(rte_mbuf_refcnt_read(m) != 1)) {
+                       rte_errno = ENOTSUP;
+                       break;
+               }
+
+               /* ef10_simple requires single pool for all packets */
+               if (unlikely(m->pool != tx_pkts[0]->pool)) {
+                       rte_errno = ENOTSUP;
+                       break;
+               }
+       }
+
+       return i;
+}
+#endif
 
 static uint16_t
 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -873,7 +934,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
        if (txq->sw_ring == NULL)
                goto fail_sw_ring_alloc;
 
-       if (info->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+       if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+                             DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+                             DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
                txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
                                              info->txq_entries,
                                              SFC_TSOH_STD_LEN,
@@ -1035,11 +1098,15 @@ struct sfc_dp_tx sfc_ef10_tx = {
                .type           = SFC_DP_TX,
                .hw_fw_caps     = SFC_DP_HW_FW_CAP_EF10,
        },
-       .features               = SFC_DP_TX_FEAT_TSO |
-                                 SFC_DP_TX_FEAT_MULTI_SEG |
-                                 SFC_DP_TX_FEAT_MULTI_POOL |
-                                 SFC_DP_TX_FEAT_REFCNT |
-                                 SFC_DP_TX_FEAT_MULTI_PROCESS,
+       .features               = SFC_DP_TX_FEAT_MULTI_PROCESS,
+       .dev_offload_capa       = DEV_TX_OFFLOAD_MULTI_SEGS,
+       .queue_offload_capa     = DEV_TX_OFFLOAD_IPV4_CKSUM |
+                                 DEV_TX_OFFLOAD_UDP_CKSUM |
+                                 DEV_TX_OFFLOAD_TCP_CKSUM |
+                                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                                 DEV_TX_OFFLOAD_TCP_TSO |
+                                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO,
        .get_dev_info           = sfc_ef10_get_dev_info,
        .qsize_up_rings         = sfc_ef10_tx_qsize_up_rings,
        .qcreate                = sfc_ef10_tx_qcreate,
@@ -1059,6 +1126,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
                .type           = SFC_DP_TX,
        },
        .features               = SFC_DP_TX_FEAT_MULTI_PROCESS,
+       .dev_offload_capa       = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
+       .queue_offload_capa     = DEV_TX_OFFLOAD_IPV4_CKSUM |
+                                 DEV_TX_OFFLOAD_UDP_CKSUM |
+                                 DEV_TX_OFFLOAD_TCP_CKSUM |
+                                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM,
        .get_dev_info           = sfc_ef10_get_dev_info,
        .qsize_up_rings         = sfc_ef10_tx_qsize_up_rings,
        .qcreate                = sfc_ef10_tx_qcreate,
@@ -1068,5 +1140,8 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
        .qstop                  = sfc_ef10_tx_qstop,
        .qreap                  = sfc_ef10_tx_qreap,
        .qdesc_status           = sfc_ef10_tx_qdesc_status,
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+       .pkt_prepare            = sfc_ef10_simple_prepare_pkts,
+#endif
        .pkt_burst              = sfc_ef10_simple_xmit_pkts,
 };