common/mlx5: fix default devargs initialization
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
index 3e25551..deb95ae 100644 (file)
@@ -3,6 +3,8 @@
  */
 #include <cnxk_ethdev.h>
 
+#include <rte_eventdev.h>
+
 static inline uint64_t
 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 {
@@ -10,7 +12,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 
        if (roc_nix_is_vf_or_sdp(&dev->nix) ||
            dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
-               capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+               capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
        return capa;
 }
@@ -28,11 +30,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
        uint32_t speed_capa;
 
        /* Auto negotiation disabled */
-       speed_capa = ETH_LINK_SPEED_FIXED;
+       speed_capa = RTE_ETH_LINK_SPEED_FIXED;
        if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
-               speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
-                             ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
-                             ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+               speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+                             RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
+                             RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
        }
 
        return speed_capa;
@@ -65,7 +67,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
        struct roc_nix *nix = &dev->nix;
        int i, rc = 0;
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
                /* Setup Inline Inbound */
                rc = roc_nix_inl_inb_init(nix);
                if (rc) {
@@ -78,10 +80,21 @@ nix_security_setup(struct cnxk_eth_dev *dev)
                 * Will be overridden when event mode rq's are setup.
                 */
                cnxk_nix_inb_mode_set(dev, true);
+
+               /* Allocate memory to be used as dptr for CPT ucode
+                * WRITE_SA op.
+                */
+               dev->inb.sa_dptr =
+                       plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
+               if (!dev->inb.sa_dptr) {
+                       plt_err("Couldn't allocate memory for SA dptr");
+                       rc = -ENOMEM;
+                       goto cleanup;
+               }
        }
 
-       if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-           dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+           dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
                struct plt_bitmap *bmap;
                size_t bmap_sz;
                void *mem;
@@ -95,14 +108,25 @@ nix_security_setup(struct cnxk_eth_dev *dev)
                if (rc) {
                        plt_err("Failed to initialize nix inline outb, rc=%d",
                                rc);
-                       goto cleanup;
+                       goto sa_dptr_free;
                }
 
                dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
 
                /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
-               if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
-                       goto done;
+               if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
+                       return 0;
+
+               /* Allocate memory to be used as dptr for CPT ucode
+                * WRITE_SA op.
+                */
+               dev->outb.sa_dptr =
+                       plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
+               if (!dev->outb.sa_dptr) {
+                       plt_err("Couldn't allocate memory for SA dptr");
+                       rc = -ENOMEM;
+                       goto sa_dptr_free;
+               }
 
                rc = -ENOMEM;
                /* Allocate a bitmap to alloc and free sa indexes */
@@ -112,7 +136,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
                        plt_err("Outbound SA bmap alloc failed");
 
                        rc |= roc_nix_inl_outb_fini(nix);
-                       goto cleanup;
+                       goto sa_dptr_free;
                }
 
                rc = -EIO;
@@ -122,7 +146,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 
                        rc |= roc_nix_inl_outb_fini(nix);
                        plt_free(mem);
-                       goto cleanup;
+                       goto sa_dptr_free;
                }
 
                for (i = 0; i < dev->outb.max_sa; i++)
@@ -132,15 +156,51 @@ nix_security_setup(struct cnxk_eth_dev *dev)
                dev->outb.sa_bmap_mem = mem;
                dev->outb.sa_bmap = bmap;
        }
-
-done:
        return 0;
+
+sa_dptr_free:
+       if (dev->inb.sa_dptr)
+               plt_free(dev->inb.sa_dptr);
+       if (dev->outb.sa_dptr)
+               plt_free(dev->outb.sa_dptr);
 cleanup:
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
                rc |= roc_nix_inl_inb_fini(nix);
        return rc;
 }
 
+static int
+nix_meter_fini(struct cnxk_eth_dev *dev)
+{
+       struct cnxk_meter_node *next_mtr = NULL;
+       struct roc_nix_bpf_objs profs = {0};
+       struct cnxk_meter_node *mtr = NULL;
+       struct cnxk_mtr *fms = &dev->mtr;
+       struct roc_nix *nix = &dev->nix;
+       struct roc_nix_rq *rq;
+       uint32_t i;
+       int rc = 0;
+
+       RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
+               for (i = 0; i < mtr->rq_num; i++) {
+                       rq = &dev->rqs[mtr->rq_id[i]];
+                       rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
+               }
+
+               profs.level = mtr->level;
+               profs.count = 1;
+               profs.ids[0] = mtr->bpf_id;
+               rc = roc_nix_bpf_free(nix, &profs, 1);
+
+               if (rc)
+                       return rc;
+
+               TAILQ_REMOVE(fms, mtr, next);
+               plt_free(mtr);
+       }
+       return 0;
+}
+
 static int
 nix_security_release(struct cnxk_eth_dev *dev)
 {
@@ -150,7 +210,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
        int rc, ret = 0;
 
        /* Cleanup Inline inbound */
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
                /* Destroy inbound sessions */
                tvar = NULL;
                RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@@ -164,11 +224,16 @@ nix_security_release(struct cnxk_eth_dev *dev)
                if (rc)
                        plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
                ret |= rc;
+
+               if (dev->inb.sa_dptr) {
+                       plt_free(dev->inb.sa_dptr);
+                       dev->inb.sa_dptr = NULL;
+               }
        }
 
        /* Cleanup Inline outbound */
-       if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
-           dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+           dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
                /* Destroy outbound sessions */
                tvar = NULL;
                RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
@@ -184,6 +249,10 @@ nix_security_release(struct cnxk_eth_dev *dev)
                plt_free(dev->outb.sa_bmap_mem);
                dev->outb.sa_bmap = NULL;
                dev->outb.sa_bmap_mem = NULL;
+               if (dev->outb.sa_dptr) {
+                       plt_free(dev->outb.sa_dptr);
+                       dev->outb.sa_dptr = NULL;
+               }
        }
 
        dev->inb.inl_dev = false;
@@ -210,8 +279,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
        buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
        if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
-               dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
-               dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+               dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+               dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
        }
 }
 
@@ -237,24 +306,22 @@ static int
 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       enum roc_nix_fc_mode fc_mode = ROC_NIX_FC_FULL;
        struct cnxk_fc_cfg *fc = &dev->fc_cfg;
-       struct rte_eth_fc_conf fc_conf = {0};
        int rc;
 
-       /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
-        * by AF driver, update those info in PMD structure.
-        */
-       rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
-       if (rc)
-               goto exit;
+       /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+       if (roc_model_is_cn96_ax() &&
+           dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG)
+               fc_mode = ROC_NIX_FC_TX;
 
-       fc->mode = fc_conf.mode;
-       fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-                       (fc_conf.mode == RTE_FC_RX_PAUSE);
-       fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
-                       (fc_conf.mode == RTE_FC_TX_PAUSE);
+       /* By default enable flow control */
+       rc = roc_nix_fc_mode_set(&dev->nix, fc_mode);
+       if (rc)
+               return rc;
 
-exit:
+       fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
+                                                 RTE_ETH_FC_TX_PAUSE;
        return rc;
 }
 
@@ -265,7 +332,7 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
        struct cnxk_fc_cfg *fc = &dev->fc_cfg;
        struct rte_eth_fc_conf fc_cfg = {0};
 
-       if (roc_nix_is_vf_or_sdp(&dev->nix))
+       if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
                return 0;
 
        fc_cfg.mode = fc->mode;
@@ -273,11 +340,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
        /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
        if (roc_model_is_cn96_ax() &&
            dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
-           (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+           (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
                fc_cfg.mode =
-                               (fc_cfg.mode == RTE_FC_FULL ||
-                               fc_cfg.mode == RTE_FC_TX_PAUSE) ?
-                               RTE_FC_TX_PAUSE : RTE_FC_NONE;
+                               (fc_cfg.mode == RTE_ETH_FC_FULL ||
+                               fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+                               RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
        }
 
        return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@@ -320,7 +387,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
         * Maximum three segments can be supported with W8, Choose
         * NIX_MAXSQESZ_W16 for multi segment offload.
         */
-       if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
                return NIX_MAXSQESZ_W16;
        else
                return NIX_MAXSQESZ_W8;
@@ -348,7 +415,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        /* When Tx Security offload is enabled, increase tx desc count by
         * max possible outbound desc count.
         */
-       if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+       if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
                nb_desc += dev->outb.nb_desc;
 
        /* Setup ROC SQ */
@@ -467,7 +534,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
         * to avoid meta packet drop as LBK does not currently support
         * backpressure.
         */
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
                uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
 
                /* Use current RQ's aura limit if inl rq is not available */
@@ -476,6 +543,11 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
        }
 
+       /* Its a no-op when inline device is not used */
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
+           dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+               roc_nix_inl_dev_xaq_realloc(mp->pool_id);
+
        /* Setup ROC CQ */
        cq = &dev->cqs[qid];
        cq->qid = qid;
@@ -500,6 +572,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        rq->first_skip = first_skip;
        rq->later_skip = sizeof(struct rte_mbuf);
        rq->lpb_size = mp->elt_size;
+       rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
 
        /* Enable Inline IPSec on RQ, will not be used for Poll mode */
        if (roc_nix_inl_inb_is_enabled(nix))
@@ -529,7 +602,14 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        rxq_sp->qconf.nb_desc = nb_desc;
        rxq_sp->qconf.mp = mp;
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+               /* Pass a tagmask used to handle error packets in inline device.
+                * Ethdev rq's tag_mask field will be overwritten later
+                * when sso is setup.
+                */
+               rq->tag_mask =
+                       0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
+
                /* Setup rq reference for inline dev if present */
                rc = roc_nix_inl_dev_rq_get(rq);
                if (rc)
@@ -547,7 +627,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
         * These are needed in deriving raw clock value from tsc counter.
         * read_clock eth op returns raw clock value.
         */
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
                rc = cnxk_nix_tsc_convert(dev);
                if (rc) {
                        plt_err("Failed to calculate delta and freq mult");
@@ -586,7 +666,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
        plt_nix_dbg("Releasing rxq %u", qid);
 
        /* Release rq reference for inline dev if present */
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
                roc_nix_inl_dev_rq_put(rq);
 
        /* Cleanup ROC RQ */
@@ -625,24 +705,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
        dev->ethdev_rss_hf = ethdev_rss;
 
-       if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+       if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
            dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
                flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
        }
 
-       if (ethdev_rss & ETH_RSS_C_VLAN)
+       if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
                flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
 
-       if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+       if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
                flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
 
-       if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+       if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
                flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
 
-       if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+       if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
                flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
 
-       if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+       if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
                flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
 
        if (ethdev_rss & RSS_IPV4_ENABLE)
@@ -651,34 +731,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
        if (ethdev_rss & RSS_IPV6_ENABLE)
                flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
 
-       if (ethdev_rss & ETH_RSS_TCP)
+       if (ethdev_rss & RTE_ETH_RSS_TCP)
                flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
 
-       if (ethdev_rss & ETH_RSS_UDP)
+       if (ethdev_rss & RTE_ETH_RSS_UDP)
                flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
 
-       if (ethdev_rss & ETH_RSS_SCTP)
+       if (ethdev_rss & RTE_ETH_RSS_SCTP)
                flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
 
-       if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+       if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
                flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
 
        if (ethdev_rss & RSS_IPV6_EX_ENABLE)
                flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
 
-       if (ethdev_rss & ETH_RSS_PORT)
+       if (ethdev_rss & RTE_ETH_RSS_PORT)
                flowkey_cfg |= FLOW_KEY_TYPE_PORT;
 
-       if (ethdev_rss & ETH_RSS_NVGRE)
+       if (ethdev_rss & RTE_ETH_RSS_NVGRE)
                flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
 
-       if (ethdev_rss & ETH_RSS_VXLAN)
+       if (ethdev_rss & RTE_ETH_RSS_VXLAN)
                flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
 
-       if (ethdev_rss & ETH_RSS_GENEVE)
+       if (ethdev_rss & RTE_ETH_RSS_GENEVE)
                flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
 
-       if (ethdev_rss & ETH_RSS_GTPU)
+       if (ethdev_rss & RTE_ETH_RSS_GTPU)
                flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
 
        return flowkey_cfg;
@@ -698,10 +778,17 @@ nix_free_queue_mem(struct cnxk_eth_dev *dev)
 static int
 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
 {
+       struct rte_eth_dev *eth_dev = dev->eth_dev;
+       int rc = 0;
+
        TAILQ_INIT(&dev->mtr_profiles);
        TAILQ_INIT(&dev->mtr_policy);
+       TAILQ_INIT(&dev->mtr);
 
-       return 0;
+       if (eth_dev->dev_ops->mtr_ops_get == NULL)
+               return rc;
+
+       return nix_mtr_capabilities_init(eth_dev);
 }
 
 static int
@@ -713,7 +800,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
        uint64_t rss_hf;
 
        rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
-       rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+       rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
        if (rss_hash_level)
                rss_hash_level -= 1;
 
@@ -860,24 +947,12 @@ tx_queue_release:
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
                dev_ops->tx_queue_release(eth_dev, i);
 fail:
-       if (tx_qconf)
-               free(tx_qconf);
-       if (rx_qconf)
-               free(rx_qconf);
+       free(tx_qconf);
+       free(rx_qconf);
 
        return rc;
 }
 
-static uint16_t
-nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
-{
-       RTE_SET_USED(queue);
-       RTE_SET_USED(mbufs);
-       RTE_SET_USED(pkts);
-
-       return 0;
-}
-
 static void
 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
 {
@@ -888,8 +963,8 @@ nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
         * which caused app crash since rx/tx burst is still
         * on different lcores
         */
-       eth_dev->tx_pkt_burst = nix_eth_nop_burst;
-       eth_dev->rx_pkt_burst = nix_eth_nop_burst;
+       eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+       eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
        rte_mb();
 }
 
@@ -925,8 +1000,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
 
        /* Nothing much to do if offload is not enabled */
        if (!(dev->tx_offloads &
-             (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-              DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+             (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+              RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
                return 0;
 
        /* Setup LSO formats in AF. Its a no-op if other ethdev has
@@ -974,13 +1049,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto fail_configure;
        }
 
-       if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-           rxmode->mq_mode != ETH_MQ_RX_RSS) {
+       if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+           rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
                plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
                goto fail_configure;
        }
 
-       if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+       if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
                plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
                goto fail_configure;
        }
@@ -1001,6 +1076,11 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                if (rc)
                        goto fail_configure;
 
+               /* Disable and free rte_meter entries */
+               rc = nix_meter_fini(dev);
+               if (rc)
+                       goto fail_configure;
+
                /* Cleanup security support */
                rc = nix_security_release(dev);
                if (rc)
@@ -1016,7 +1096,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        /* Prepare rx cfg */
        rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
        if (dev->rx_offloads &
-           (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+           (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
                rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
                rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
        }
@@ -1024,7 +1104,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
                   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
                rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
                /* Disable drop re if rx offload security is enabled and
                 * platform does not support it.
@@ -1085,7 +1165,10 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto free_nix_lf;
        }
 
-       rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
+       rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
+                                   dev->npc.pre_l2_size_offset,
+                                   dev->npc.pre_l2_size_offset_mask,
+                                   dev->npc.pre_l2_size_shift_dir);
        if (rc) {
                plt_err("Failed to enable switch type nix_lf rc=%d", rc);
                goto free_nix_lf;
@@ -1160,8 +1243,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto cq_fini;
        }
 
+       /* Setup Inline security support */
+       rc = nix_security_setup(dev);
+       if (rc)
+               goto cq_fini;
+
        /* Init flow control configuration */
-       fc_cfg.cq_cfg_valid = false;
+       fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
        fc_cfg.rxchan_cfg.enable = true;
        rc = roc_nix_fc_config_set(nix, &fc_cfg);
        if (rc) {
@@ -1176,11 +1264,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto cq_fini;
        }
 
-       /* Setup Inline security support */
-       rc = nix_security_setup(dev);
-       if (rc)
-               goto cq_fini;
-
+       /* Initialize TC to SQ mapping as invalid */
+       memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
        /*
         * Restore queue config when reconfigure followed by
         * reconfigure and no queue configure invoked from application case.
@@ -1325,8 +1410,10 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
        int count, i, j, rc;
        void *rxq;
 
-       /* Disable switch hdr pkind */
-       roc_nix_switch_hdr_set(&dev->nix, 0);
+       /* Disable all the NPC entries */
+       rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0);
+       if (rc)
+               return rc;
 
        /* Stop link change events */
        if (!roc_nix_is_vf_or_sdp(&dev->nix))
@@ -1335,6 +1422,8 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
        /* Disable Rx via NPC */
        roc_nix_npc_rx_ena_dis(&dev->nix, false);
 
+       roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
+
        /* Stop rx queues and free up pkts pending */
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
                rc = dev_ops->rx_queue_stop(eth_dev, i);
@@ -1401,6 +1490,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
                return rc;
        }
 
+       rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1);
+       if (rc) {
+               plt_err("Failed to enable NPC entries %d", rc);
+               return rc;
+       }
+
        cnxk_nix_toggle_flag_link_cfg(dev, true);
 
        /* Start link change events */
@@ -1416,12 +1511,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
         * enabled on PF owning this VF
         */
        memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
                cnxk_eth_dev_ops.timesync_enable(eth_dev);
        else
                cnxk_eth_dev_ops.timesync_disable(eth_dev);
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
                rc = rte_mbuf_dyn_rx_timestamp_register
                        (&dev->tstamp.tstamp_dynfield_offset,
                         &dev->tstamp.rx_tstamp_dynflag);
@@ -1433,6 +1528,8 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 
        cnxk_nix_toggle_flag_link_cfg(dev, false);
 
+       roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, true);
+
        return 0;
 
 rx_disable:
@@ -1469,6 +1566,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
        .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
        .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
        .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+       .priority_flow_ctrl_queue_config =
+                               cnxk_nix_priority_flow_ctrl_queue_config,
+       .priority_flow_ctrl_queue_info_get =
+                               cnxk_nix_priority_flow_ctrl_queue_info_get,
        .dev_set_link_up = cnxk_nix_set_link_up,
        .dev_set_link_down = cnxk_nix_set_link_down,
        .get_module_info = cnxk_nix_get_module_info,
@@ -1516,6 +1617,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        int rc, max_entries;
 
        eth_dev->dev_ops = &cnxk_eth_dev_ops;
+       eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
+       eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
+       eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
 
        /* Alloc security context */
        sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
@@ -1526,8 +1630,6 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        sec_ctx->flags =
                (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
        eth_dev->security_ctx = sec_ctx;
-       TAILQ_INIT(&dev->inb.list);
-       TAILQ_INIT(&dev->outb.list);
 
        /* For secondary processes, the primary has done all the work */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -1563,6 +1665,11 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        dev->configured = 0;
        dev->ptype_disable = 0;
 
+       TAILQ_INIT(&dev->inb.list);
+       TAILQ_INIT(&dev->outb.list);
+       rte_spinlock_init(&dev->inb.lock);
+       rte_spinlock_init(&dev->outb.lock);
+
        /* For vfs, returned max_entries will be 0. but to keep default mac
         * address, one entry must be allocated. so setting up to 1.
         */
@@ -1642,9 +1749,14 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
        const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+       struct rte_eth_pfc_queue_conf pfc_conf;
        struct roc_nix *nix = &dev->nix;
+       struct rte_eth_fc_conf fc_conf;
        int rc, i;
 
+       /* Disable switch hdr pkind */
+       roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
+
        plt_free(eth_dev->security_ctx);
        eth_dev->security_ctx = NULL;
 
@@ -1657,6 +1769,33 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
        roc_nix_npc_rx_ena_dis(nix, false);
 
+       /* Restore 802.3 Flow control configuration */
+       memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
+       memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+       fc_conf.mode = RTE_ETH_FC_NONE;
+       rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+       pfc_conf.mode = RTE_ETH_FC_NONE;
+       for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+               if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+                       pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+                       pfc_conf.rx_pause.tc = i;
+                       pfc_conf.tx_pause.rx_qid = i;
+                       pfc_conf.tx_pause.tc = i;
+                       rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+                               &pfc_conf);
+                       if (rc)
+                               plt_err("Failed to reset PFC. error code(%d)",
+                                       rc);
+               }
+       }
+
+       fc_conf.mode = RTE_ETH_FC_FULL;
+       rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+       /* Disable and free rte_meter entries */
+       nix_meter_fini(dev);
+
        /* Disable and free rte_flow entries */
        roc_npc_fini(&dev->npc);