net/cnxk: fix RSS RETA table update
[dpdk.git] / drivers / net / cnxk / cn10k_ethdev.c
index 9dfea99..e57847e 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */
 #include "cn10k_ethdev.h"
-#include "cn10k_rte_flow.h"
+#include "cn10k_flow.h"
 #include "cn10k_rx.h"
 #include "cn10k_tx.h"
 
@@ -15,30 +15,33 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
        struct rte_eth_rxmode *rxmode = &conf->rxmode;
        uint16_t flags = 0;
 
-       if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-           (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+       if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+           (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
                flags |= NIX_RX_OFFLOAD_RSS_F;
 
        if (dev->rx_offloads &
-           (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+           (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
        if (dev->rx_offloads &
-           (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+           (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
                flags |= NIX_RX_MULTI_SEG_F;
 
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
                flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
        if (!dev->ptype_disable)
                flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
                flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
+       if (dev->rx_mark_update)
+               flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
+
        return flags;
 }
 
@@ -50,15 +53,15 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
        uint16_t flags = 0;
 
        /* Fastpath is dependent on these enums */
-       RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
-       RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
-       RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
-       RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
-       RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
        RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
        RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
@@ -72,39 +75,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
                         offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-       if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-           conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+       if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+           conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
                flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-       if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-           conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+       if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+           conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
                flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-       if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-           conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-           conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+       if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+           conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+           conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
                flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-       if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+       if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
                flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-       if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+       if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
                flags |= NIX_TX_MULTI_SEG_F;
 
        /* Enable Inner checksum for TSO */
-       if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+       if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
                flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
        /* Enable Inner and Outer checksum for Tunnel TSO */
-       if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                   DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
+       if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+                   RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
                flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
                          NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
                flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
-       if (conf & DEV_TX_OFFLOAD_SECURITY)
+       if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
                flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
        return flags;
@@ -131,53 +134,31 @@ static void
 nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
                      uint16_t qid)
 {
-       struct nix_send_ext_s *send_hdr_ext;
        union nix_send_hdr_w0_u send_hdr_w0;
-       struct nix_send_mem_s *send_mem;
-       union nix_send_sg_s sg_w0;
-
-       RTE_SET_USED(dev);
 
        /* Initialize the fields based on basic single segment packet */
-       memset(&txq->cmd, 0, sizeof(txq->cmd));
        send_hdr_w0.u = 0;
-       sg_w0.u = 0;
-
        if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
                /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
                send_hdr_w0.sizem1 = 2;
-
-               send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[0];
-               send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
                if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
                        /* Default: one seg packet would have:
                         * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
                         * => 8/2 - 1 = 3
                         */
                        send_hdr_w0.sizem1 = 3;
-                       send_hdr_ext->w0.tstmp = 1;
 
                        /* To calculate the offset for send_mem,
                         * send_hdr->w0.sizem1 * 2
                         */
-                       send_mem = (struct nix_send_mem_s *)(txq->cmd + 2);
-                       send_mem->w0.subdc = NIX_SUBDC_MEM;
-                       send_mem->w0.alg = NIX_SENDMEMALG_SETTSTMP;
-                       send_mem->addr = dev->tstamp.tx_tstamp_iova;
+                       txq->ts_mem = dev->tstamp.tx_tstamp_iova;
                }
        } else {
                /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
                send_hdr_w0.sizem1 = 1;
        }
-
        send_hdr_w0.sq = qid;
-       sg_w0.subdc = NIX_SUBDC_SG;
-       sg_w0.segs = 1;
-       sg_w0.ld_type = NIX_SENDLDTYPE_LDD;
-
        txq->send_hdr_w0 = send_hdr_w0.u;
-       txq->sg_w0 = sg_w0.u;
-
        rte_wmb();
 }
 
@@ -313,6 +294,12 @@ cn10k_nix_configure(struct rte_eth_dev *eth_dev)
        if (rc)
                return rc;
 
+       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+           dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+               /* Register callback to handle security error work */
+               roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
+       }
+
        /* Update offload flags */
        dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
        dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
@@ -441,6 +428,27 @@ cn10k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int
+cn10k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
+                                    struct timespec *timestamp)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_timesync_info *tstamp = &dev->tstamp;
+       uint64_t ns;
+
+       if (*tstamp->tx_tstamp == 0)
+               return -EINVAL;
+
+       *tstamp->tx_tstamp = ((*tstamp->tx_tstamp >> 32) * NSEC_PER_SEC) +
+               (*tstamp->tx_tstamp & 0xFFFFFFFFUL);
+       ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
+       *timestamp = rte_ns_to_timespec(ns);
+       *tstamp->tx_tstamp = 0;
+       rte_wmb();
+
+       return 0;
+}
+
 static int
 cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
 {
@@ -470,6 +478,27 @@ cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int
+cn10k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+       *features &=
+               (RTE_ETH_RX_METADATA_USER_FLAG | RTE_ETH_RX_METADATA_USER_MARK);
+
+       if (*features) {
+               dev->rx_offload_flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F;
+               dev->rx_mark_update = true;
+       } else {
+               dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
+               dev->rx_mark_update = false;
+       }
+
+       cn10k_eth_set_rx_function(eth_dev);
+
+       return 0;
+}
+
 /* Update platform specific eth dev ops */
 static void
 nix_eth_dev_ops_override(void)
@@ -489,6 +518,10 @@ nix_eth_dev_ops_override(void)
        cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
        cnxk_eth_dev_ops.timesync_enable = cn10k_nix_timesync_enable;
        cnxk_eth_dev_ops.timesync_disable = cn10k_nix_timesync_disable;
+       cnxk_eth_dev_ops.rx_metadata_negotiate =
+               cn10k_nix_rx_metadata_negotiate;
+       cnxk_eth_dev_ops.timesync_read_tx_timestamp =
+               cn10k_nix_timesync_read_tx_timestamp;
 }
 
 static void
@@ -553,10 +586,15 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 
        dev = cnxk_eth_pmd_priv(eth_dev);
 
-       /* DROP_RE is not supported with inline IPSec for CN10K A0 */
-       if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
-           roc_model_is_cnf10kb_a0())
+       /* DROP_RE is not supported with inline IPSec for CN10K A0 and
+        * when vector mode is enabled.
+        */
+       if ((roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
+            roc_model_is_cnf10kb_a0()) &&
+           !roc_env_is_asim()) {
                dev->ipsecd_drop_re_dis = 1;
+               dev->vec_drop_re_dis = 1;
+       }
 
        /* Register up msg callbacks for PTP information */
        roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb);
@@ -567,10 +605,13 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 static const struct rte_pci_id cn10k_pci_nix_map[] = {
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_PF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_PF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_PF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_VF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_AF_VF),
        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_AF_VF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_AF_VF),
        {
                .vendor_id = 0,
        },