net/ngbe: support MAC filters
[dpdk.git] / drivers / net / octeontx2 / otx2_tx.c
index 4458d8b..e951846 100644 (file)
@@ -27,6 +27,7 @@ nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        struct otx2_eth_txq *txq = tx_queue; uint16_t i;
        const rte_iova_t io_addr = txq->io_addr;
        void *lmt_addr = txq->lmt_addr;
+       uint64_t lso_tun_fmt;
 
        NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
@@ -34,6 +35,7 @@ nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        /* Perform header writes before barrier for TSO */
        if (flags & NIX_TX_OFFLOAD_TSO_F) {
+               lso_tun_fmt = txq->lso_tun_fmt;
                for (i = 0; i < pkts; i++)
                        otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
        }
@@ -45,7 +47,7 @@ nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                rte_io_wmb();
 
        for (i = 0; i < pkts; i++) {
-               otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+               otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
                /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
                otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
                                             tx_pkts[i]->ol_flags, 4, flags);
@@ -65,6 +67,7 @@ nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
        struct otx2_eth_txq *txq = tx_queue; uint64_t i;
        const rte_iova_t io_addr = txq->io_addr;
        void *lmt_addr = txq->lmt_addr;
+       uint64_t lso_tun_fmt;
        uint16_t segdw;
 
        NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -73,15 +76,20 @@ nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        /* Perform header writes before barrier for TSO */
        if (flags & NIX_TX_OFFLOAD_TSO_F) {
+               lso_tun_fmt = txq->lso_tun_fmt;
                for (i = 0; i < pkts; i++)
                        otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
        }
 
+       /* Lets commit any changes in the packet here as no further changes
+        * to the packet will be done unless no fast free is enabled.
+        */
+       if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+               rte_io_wmb();
+
        for (i = 0; i < pkts; i++) {
-               otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+               otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
                segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
-               /* Lets commit any changes in the packet */
-               rte_io_wmb();
                otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
                                             tx_pkts[i]->ol_flags, segdw,
                                             flags);
@@ -194,7 +202,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
                        if (otx2_nix_prefree_seg(mbuf))
                                vsetq_lane_u64(0x80000, xmask01, 0);
                        else
-                               __mempool_check_cookies(mbuf->pool,
+                               RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
                                                        (void **)&mbuf,
                                                        1, 0);
 
@@ -203,7 +211,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
                        if (otx2_nix_prefree_seg(mbuf))
                                vsetq_lane_u64(0x80000, xmask01, 1);
                        else
-                               __mempool_check_cookies(mbuf->pool,
+                               RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
                                                        (void **)&mbuf,
                                                        1, 0);
 
@@ -212,7 +220,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
                        if (otx2_nix_prefree_seg(mbuf))
                                vsetq_lane_u64(0x80000, xmask23, 0);
                        else
-                               __mempool_check_cookies(mbuf->pool,
+                               RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
                                                        (void **)&mbuf,
                                                        1, 0);
 
@@ -221,7 +229,7 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
                        if (otx2_nix_prefree_seg(mbuf))
                                vsetq_lane_u64(0x80000, xmask23, 1);
                        else
-                               __mempool_check_cookies(mbuf->pool,
+                               RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
                                                        (void **)&mbuf,
                                                        1, 0);
                        senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
@@ -237,22 +245,22 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
                         */
                        mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
                                offsetof(struct rte_mbuf, buf_iova));
-                       __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+                       RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
                                                1, 0);
 
                        mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
                                offsetof(struct rte_mbuf, buf_iova));
-                       __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+                       RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
                                                1, 0);
 
                        mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
                                offsetof(struct rte_mbuf, buf_iova));
-                       __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+                       RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
                                                1, 0);
 
                        mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
                                offsetof(struct rte_mbuf, buf_iova));
-                       __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+                       RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
                                                1, 0);
                        RTE_SET_USED(mbuf);
                }
@@ -356,26 +364,26 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
                        const uint8x16_t tbl = {
                                /* [0-15] = il4type:il3type */
                                0x04, /* none (IPv6 assumed) */
-                               0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
-                               0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
-                               0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
-                               0x03, /* PKT_TX_IP_CKSUM */
-                               0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
-                               0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
-                               0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
-                               0x02, /* PKT_TX_IPV4  */
-                               0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
-                               0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
-                               0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
-                               0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
-                               0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
-                                      * PKT_TX_TCP_CKSUM
+                               0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
+                               0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
+                               0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
+                               0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+                               0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
+                               0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
+                               0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
+                               0x02, /* RTE_MBUF_F_TX_IPV4  */
+                               0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
+                               0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
+                               0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
+                               0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
+                               0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+                                      * RTE_MBUF_F_TX_TCP_CKSUM
                                       */
-                               0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
-                                      * PKT_TX_SCTP_CKSUM
+                               0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+                                      * RTE_MBUF_F_TX_SCTP_CKSUM
                                       */
-                               0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
-                                      * PKT_TX_UDP_CKSUM
+                               0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+                                      * RTE_MBUF_F_TX_UDP_CKSUM
                                       */
                        };
 
@@ -647,40 +655,40 @@ nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
                                {
                                        /* [0-15] = il4type:il3type */
                                        0x04, /* none (IPv6) */
-                                       0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
-                                       0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
-                                       0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
-                                       0x03, /* PKT_TX_IP_CKSUM */
-                                       0x13, /* PKT_TX_IP_CKSUM |
-                                              * PKT_TX_TCP_CKSUM
+                                       0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
+                                       0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
+                                       0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
+                                       0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+                                       0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
+                                              * RTE_MBUF_F_TX_TCP_CKSUM
                                               */
-                                       0x23, /* PKT_TX_IP_CKSUM |
-                                              * PKT_TX_SCTP_CKSUM
+                                       0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
+                                              * RTE_MBUF_F_TX_SCTP_CKSUM
                                               */
-                                       0x33, /* PKT_TX_IP_CKSUM |
-                                              * PKT_TX_UDP_CKSUM
+                                       0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
+                                              * RTE_MBUF_F_TX_UDP_CKSUM
                                               */
-                                       0x02, /* PKT_TX_IPV4 */
-                                       0x12, /* PKT_TX_IPV4 |
-                                              * PKT_TX_TCP_CKSUM
+                                       0x02, /* RTE_MBUF_F_TX_IPV4 */
+                                       0x12, /* RTE_MBUF_F_TX_IPV4 |
+                                              * RTE_MBUF_F_TX_TCP_CKSUM
                                               */
-                                       0x22, /* PKT_TX_IPV4 |
-                                              * PKT_TX_SCTP_CKSUM
+                                       0x22, /* RTE_MBUF_F_TX_IPV4 |
+                                              * RTE_MBUF_F_TX_SCTP_CKSUM
                                               */
-                                       0x32, /* PKT_TX_IPV4 |
-                                              * PKT_TX_UDP_CKSUM
+                                       0x32, /* RTE_MBUF_F_TX_IPV4 |
+                                              * RTE_MBUF_F_TX_UDP_CKSUM
                                               */
-                                       0x03, /* PKT_TX_IPV4 |
-                                              * PKT_TX_IP_CKSUM
+                                       0x03, /* RTE_MBUF_F_TX_IPV4 |
+                                              * RTE_MBUF_F_TX_IP_CKSUM
                                               */
-                                       0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
-                                              * PKT_TX_TCP_CKSUM
+                                       0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+                                              * RTE_MBUF_F_TX_TCP_CKSUM
                                               */
-                                       0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
-                                              * PKT_TX_SCTP_CKSUM
+                                       0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+                                              * RTE_MBUF_F_TX_SCTP_CKSUM
                                               */
-                                       0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
-                                              * PKT_TX_UDP_CKSUM
+                                       0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+                                              * RTE_MBUF_F_TX_UDP_CKSUM
                                               */
                                },
 
@@ -1062,7 +1070,7 @@ NIX_TX_FASTPATH_MODES
        else
                pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
 
-       if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
                pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
 
        rte_mb();