net/cnxk: support meter ops get
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
index 2775fe4..d2ee39d 100644 (file)
@@ -8,7 +8,8 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
 {
        uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
 
-       if (roc_nix_is_vf_or_sdp(&dev->nix))
+       if (roc_nix_is_vf_or_sdp(&dev->nix) ||
+           dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
                capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
 
        return capa;
@@ -37,6 +38,251 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
        return speed_capa;
 }
 
+int
+cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
+{
+       struct roc_nix *nix = &dev->nix;
+
+       if (dev->inb.inl_dev == use_inl_dev)
+               return 0;
+
+       plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
+                   dev->inb.nb_sess, !!dev->inb.inl_dev);
+
+       /* Change the mode */
+       dev->inb.inl_dev = use_inl_dev;
+
+       /* Update RoC for NPC rule insertion */
+       roc_nix_inb_mode_set(nix, use_inl_dev);
+
+       /* Setup lookup mem */
+       return cnxk_nix_lookup_mem_sa_base_set(dev);
+}
+
+static int
+nix_security_setup(struct cnxk_eth_dev *dev)
+{
+       struct roc_nix *nix = &dev->nix;
+       int i, rc = 0;
+
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+               /* Setup Inline Inbound */
+               rc = roc_nix_inl_inb_init(nix);
+               if (rc) {
+                       plt_err("Failed to initialize nix inline inb, rc=%d",
+                               rc);
+                       return rc;
+               }
+
+               /* By default pick using inline device for poll mode.
+                * Will be overridden when event mode rq's are setup.
+                */
+               cnxk_nix_inb_mode_set(dev, true);
+       }
+
+       if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
+           dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+               struct plt_bitmap *bmap;
+               size_t bmap_sz;
+               void *mem;
+
+               /* Setup enough descriptors for all tx queues */
+               nix->outb_nb_desc = dev->outb.nb_desc;
+               nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
+
+               /* Setup Inline Outbound */
+               rc = roc_nix_inl_outb_init(nix);
+               if (rc) {
+                       plt_err("Failed to initialize nix inline outb, rc=%d",
+                               rc);
+                       goto cleanup;
+               }
+
+               dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
+
+               /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
+               if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+                       goto done;
+
+               rc = -ENOMEM;
+               /* Allocate a bitmap to alloc and free sa indexes */
+               bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
+               mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
+               if (mem == NULL) {
+                       plt_err("Outbound SA bmap alloc failed");
+
+                       rc |= roc_nix_inl_outb_fini(nix);
+                       goto cleanup;
+               }
+
+               rc = -EIO;
+               bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
+               if (!bmap) {
+                       plt_err("Outbound SA bmap init failed");
+
+                       rc |= roc_nix_inl_outb_fini(nix);
+                       plt_free(mem);
+                       goto cleanup;
+               }
+
+               for (i = 0; i < dev->outb.max_sa; i++)
+                       plt_bitmap_set(bmap, i);
+
+               dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
+               dev->outb.sa_bmap_mem = mem;
+               dev->outb.sa_bmap = bmap;
+       }
+
+done:
+       return 0;
+cleanup:
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+               rc |= roc_nix_inl_inb_fini(nix);
+       return rc;
+}
+
+static int
+nix_security_release(struct cnxk_eth_dev *dev)
+{
+       struct rte_eth_dev *eth_dev = dev->eth_dev;
+       struct cnxk_eth_sec_sess *eth_sec, *tvar;
+       struct roc_nix *nix = &dev->nix;
+       int rc, ret = 0;
+
+       /* Cleanup Inline inbound */
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+               /* Destroy inbound sessions */
+               tvar = NULL;
+               RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
+                       cnxk_eth_sec_ops.session_destroy(eth_dev,
+                                                        eth_sec->sess);
+
+               /* Clear lookup mem */
+               cnxk_nix_lookup_mem_sa_base_clear(dev);
+
+               rc = roc_nix_inl_inb_fini(nix);
+               if (rc)
+                       plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
+               ret |= rc;
+       }
+
+       /* Cleanup Inline outbound */
+       if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
+           dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+               /* Destroy outbound sessions */
+               tvar = NULL;
+               RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
+                       cnxk_eth_sec_ops.session_destroy(eth_dev,
+                                                        eth_sec->sess);
+
+               rc = roc_nix_inl_outb_fini(nix);
+               if (rc)
+                       plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
+               ret |= rc;
+
+               plt_bitmap_free(dev->outb.sa_bmap);
+               plt_free(dev->outb.sa_bmap_mem);
+               dev->outb.sa_bmap = NULL;
+               dev->outb.sa_bmap_mem = NULL;
+       }
+
+       dev->inb.inl_dev = false;
+       roc_nix_inb_mode_set(nix, false);
+       dev->nb_rxq_sso = 0;
+       dev->inb.nb_sess = 0;
+       dev->outb.nb_sess = 0;
+       return ret;
+}
+
+static void
+nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
+{
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       struct rte_eth_dev *eth_dev;
+       struct cnxk_eth_dev *dev;
+       uint32_t buffsz;
+
+       dev = rxq->dev;
+       eth_dev = dev->eth_dev;
+
+       /* Get rx buffer size */
+       mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
+       buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+       if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
+               dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+               dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+       }
+}
+
+int
+nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct cnxk_eth_rxq_sp *rxq;
+       int rc;
+
+       rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
+       /* Setup scatter mode if needed by jumbo */
+       nix_enable_mseg_on_jumbo(rxq);
+
+       rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
+       if (rc)
+               plt_err("Failed to set default MTU size, rc=%d", rc);
+
+       return rc;
+}
+
+static int
+nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct rte_eth_fc_conf fc_conf = {0};
+       int rc;
+
+       /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+        * by AF driver, update those info in PMD structure.
+        */
+       rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
+       if (rc)
+               goto exit;
+
+       fc->mode = fc_conf.mode;
+       fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+                       (fc_conf.mode == RTE_FC_RX_PAUSE);
+       fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+                       (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+       return rc;
+}
+
+static int
+nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+       struct rte_eth_fc_conf fc_cfg = {0};
+
+       if (roc_nix_is_vf_or_sdp(&dev->nix))
+               return 0;
+
+       fc_cfg.mode = fc->mode;
+
+       /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+       if (roc_model_is_cn96_ax() &&
+           dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
+           (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+               fc_cfg.mode =
+                               (fc_cfg.mode == RTE_FC_FULL ||
+                               fc_cfg.mode == RTE_FC_TX_PAUSE) ?
+                               RTE_FC_TX_PAUSE : RTE_FC_NONE;
+       }
+
+       return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
 uint64_t
 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
 {
@@ -55,7 +301,8 @@ cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
                                 offsetof(struct rte_mbuf, data_off) !=
                         6);
        mb_def.nb_segs = 1;
-       mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+       mb_def.data_off = RTE_PKTMBUF_HEADROOM +
+                         (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
        mb_def.port = port_id;
        rte_mbuf_refcnt_set(&mb_def, 1);
 
@@ -66,6 +313,111 @@ cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
        return *tmp;
 }
 
+static inline uint8_t
+nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
+{
+       /*
+        * Maximum three segments can be supported with W8, Choose
+        * NIX_MAXSQESZ_W16 for multi segment offload.
+        */
+       if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+               return NIX_MAXSQESZ_W16;
+       else
+               return NIX_MAXSQESZ_W8;
+}
+
+int
+cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+                       uint16_t nb_desc, uint16_t fp_tx_q_sz,
+                       const struct rte_eth_txconf *tx_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+       struct cnxk_eth_txq_sp *txq_sp;
+       struct roc_nix_sq *sq;
+       size_t txq_sz;
+       int rc;
+
+       /* Free memory prior to re-allocation if needed. */
+       if (eth_dev->data->tx_queues[qid] != NULL) {
+               plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+               dev_ops->tx_queue_release(eth_dev, qid);
+               eth_dev->data->tx_queues[qid] = NULL;
+       }
+
+       /* When Tx Security offload is enabled, increase tx desc count by
+        * max possible outbound desc count.
+        */
+       if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+               nb_desc += dev->outb.nb_desc;
+
+       /* Setup ROC SQ */
+       sq = &dev->sqs[qid];
+       sq->qid = qid;
+       sq->nb_desc = nb_desc;
+       sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+
+       rc = roc_nix_sq_init(&dev->nix, sq);
+       if (rc) {
+               plt_err("Failed to init sq=%d, rc=%d", qid, rc);
+               return rc;
+       }
+
+       rc = -ENOMEM;
+       txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
+       txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
+       if (!txq_sp) {
+               plt_err("Failed to alloc tx queue mem");
+               rc |= roc_nix_sq_fini(sq);
+               return rc;
+       }
+
+       txq_sp->dev = dev;
+       txq_sp->qid = qid;
+       txq_sp->qconf.conf.tx = *tx_conf;
+       /* Queue config should reflect global offloads */
+       txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
+       txq_sp->qconf.nb_desc = nb_desc;
+
+       plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
+                   " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
+                   qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
+                   sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
+
+       /* Store start of fast path area */
+       eth_dev->data->tx_queues[qid] = txq_sp + 1;
+       eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
+}
+
+static void
+cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       void *txq = eth_dev->data->tx_queues[qid];
+       struct cnxk_eth_txq_sp *txq_sp;
+       struct cnxk_eth_dev *dev;
+       struct roc_nix_sq *sq;
+       int rc;
+
+       if (!txq)
+               return;
+
+       txq_sp = cnxk_eth_txq_to_sp(txq);
+
+       dev = txq_sp->dev;
+
+       plt_nix_dbg("Releasing txq %u", qid);
+
+       /* Cleanup ROC SQ */
+       sq = &dev->sqs[qid];
+       rc = roc_nix_sq_fini(sq);
+       if (rc)
+               plt_err("Failed to cleanup sq, rc=%d", rc);
+
+       /* Finally free */
+       plt_free(txq_sp);
+}
+
 int
 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                        uint16_t nb_desc, uint16_t fp_rx_q_sz,
@@ -73,6 +425,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                        struct rte_mempool *mp)
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct roc_nix *nix = &dev->nix;
        struct cnxk_eth_rxq_sp *rxq_sp;
        struct rte_mempool_ops *ops;
        const char *platform_ops;
@@ -106,10 +459,23 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
 
                plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
-               dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
+               dev_ops->rx_queue_release(eth_dev, qid);
                eth_dev->data->rx_queues[qid] = NULL;
        }
 
+       /* Clam up cq limit to size of packet pool aura for LBK
+        * to avoid meta packet drop as LBK does not currently support
+        * backpressure.
+        */
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+               uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
+
+               /* Use current RQ's aura limit if inl rq is not available */
+               if (!pkt_pool_limit)
+                       pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
+               nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
+       }
+
        /* Setup ROC CQ */
        cq = &dev->cqs[qid];
        cq->qid = qid;
@@ -135,6 +501,10 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        rq->later_skip = sizeof(struct rte_mbuf);
        rq->lpb_size = mp->elt_size;
 
+       /* Enable Inline IPSec on RQ, will not be used for Poll mode */
+       if (roc_nix_inl_inb_is_enabled(nix))
+               rq->ipsech_ena = true;
+
        rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
        if (rc) {
                plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
@@ -154,9 +524,18 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        rxq_sp->dev = dev;
        rxq_sp->qid = qid;
        rxq_sp->qconf.conf.rx = *rx_conf;
+       /* Queue config should reflect global offloads */
+       rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
        rxq_sp->qconf.nb_desc = nb_desc;
        rxq_sp->qconf.mp = mp;
 
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+               /* Setup rq reference for inline dev if present */
+               rc = roc_nix_inl_dev_rq_get(rq);
+               if (rc)
+                       goto free_mem;
+       }
+
        plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
                    cq->nb_desc);
 
@@ -164,7 +543,21 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        eth_dev->data->rx_queues[qid] = rxq_sp + 1;
        eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
 
+       /* Calculating delta and freq mult between PTP HI clock and tsc.
+        * These are needed in deriving raw clock value from tsc counter.
+        * read_clock eth op returns raw clock value.
+        */
+       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+               rc = cnxk_nix_tsc_convert(dev);
+               if (rc) {
+                       plt_err("Failed to calculate delta and freq mult");
+                       goto rq_fini;
+               }
+       }
+
        return 0;
+free_mem:
+       plt_free(rxq_sp);
 rq_fini:
        rc |= roc_nix_rq_fini(rq);
 cq_fini:
@@ -174,13 +567,13 @@ fail:
 }
 
 static void
-cnxk_nix_rx_queue_release(void *rxq)
+cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 {
+       void *rxq = eth_dev->data->rx_queues[qid];
        struct cnxk_eth_rxq_sp *rxq_sp;
        struct cnxk_eth_dev *dev;
        struct roc_nix_rq *rq;
        struct roc_nix_cq *cq;
-       uint16_t qid;
        int rc;
 
        if (!rxq)
@@ -188,12 +581,15 @@ cnxk_nix_rx_queue_release(void *rxq)
 
        rxq_sp = cnxk_eth_rxq_to_sp(rxq);
        dev = rxq_sp->dev;
-       qid = rxq_sp->qid;
+       rq = &dev->rqs[qid];
 
        plt_nix_dbg("Releasing rxq %u", qid);
 
+       /* Release rq reference for inline dev if present */
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+               roc_nix_inl_dev_rq_put(rq);
+
        /* Cleanup ROC RQ */
-       rq = &dev->rqs[qid];
        rc = roc_nix_rq_fini(rq);
        if (rc)
                plt_err("Failed to cleanup rq, rc=%d", rc);
@@ -229,8 +625,10 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
 
        dev->ethdev_rss_hf = ethdev_rss;
 
-       if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+       if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+           dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
                flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
+       }
 
        if (ethdev_rss & ETH_RSS_C_VLAN)
                flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
@@ -351,7 +749,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
                txq_sp = cnxk_eth_txq_to_sp(txq[i]);
                memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
                tx_qconf[i].valid = true;
-               dev_ops->tx_queue_release(txq[i]);
+               dev_ops->tx_queue_release(eth_dev, i);
                eth_dev->data->tx_queues[i] = NULL;
        }
 
@@ -365,7 +763,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
                rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
                memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
                rx_qconf[i].valid = true;
-               dev_ops->rx_queue_release(rxq[i]);
+               dev_ops->rx_queue_release(eth_dev, i);
                eth_dev->data->rx_queues[i] = NULL;
        }
 
@@ -387,7 +785,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
        struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
        int rc, i, nb_rxq, nb_txq;
-       void **txq, **rxq;
 
        nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
        nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
@@ -422,9 +819,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
                                             &tx_qconf[i].conf.tx);
                if (rc) {
                        plt_err("Failed to setup tx queue rc=%d", rc);
-                       txq = eth_dev->data->tx_queues;
                        for (i -= 1; i >= 0; i--)
-                               dev_ops->tx_queue_release(txq[i]);
+                               dev_ops->tx_queue_release(eth_dev, i);
                        goto fail;
                }
        }
@@ -440,9 +836,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
                                             rx_qconf[i].mp);
                if (rc) {
                        plt_err("Failed to setup rx queue rc=%d", rc);
-                       rxq = eth_dev->data->rx_queues;
                        for (i -= 1; i >= 0; i--)
-                               dev_ops->rx_queue_release(rxq[i]);
+                               dev_ops->rx_queue_release(eth_dev, i);
                        goto tx_queue_release;
                }
        }
@@ -453,9 +848,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        return 0;
 
 tx_queue_release:
-       txq = eth_dev->data->tx_queues;
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
-               dev_ops->tx_queue_release(txq[i]);
+               dev_ops->tx_queue_release(eth_dev, i);
 fail:
        if (tx_qconf)
                free(tx_qconf);
@@ -545,6 +939,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        struct rte_eth_rxmode *rxmode = &conf->rxmode;
        struct rte_eth_txmode *txmode = &conf->txmode;
        char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
+       struct roc_nix_fc_cfg fc_cfg = {0};
        struct roc_nix *nix = &dev->nix;
        struct rte_ether_addr *ea;
        uint8_t nb_rxq, nb_txq;
@@ -596,6 +991,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                rc = nix_store_queue_cfg_and_then_release(eth_dev);
                if (rc)
                        goto fail_configure;
+
+               /* Cleanup security support */
+               rc = nix_security_release(dev);
+               if (rc)
+                       goto fail_configure;
+
                roc_nix_tm_fini(nix);
                roc_nix_lf_free(nix);
        }
@@ -614,6 +1015,15 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                   ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
                   ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
 
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+               rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
+               /* Disable drop re if rx offload security is enabled and
+                * platform does not support it.
+                */
+               if (dev->ipsecd_drop_re_dis)
+                       rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
+       }
+
        nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
        nb_txq = RTE_MAX(data->nb_tx_queues, 1);
 
@@ -624,6 +1034,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto fail_configure;
        }
 
+       dev->npc.channel = roc_nix_get_base_chan(nix);
+
        nb_rxq = data->nb_rx_queues;
        nb_txq = data->nb_tx_queues;
        rc = -ENOMEM;
@@ -658,11 +1070,18 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        roc_nix_err_intr_ena_dis(nix, true);
        roc_nix_ras_intr_ena_dis(nix, true);
 
-       if (nix->rx_ptp_ena) {
+       if (nix->rx_ptp_ena &&
+           dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
                plt_err("Both PTP and switch header enabled");
                goto free_nix_lf;
        }
 
+       rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
+       if (rc) {
+               plt_err("Failed to enable switch type nix_lf rc=%d", rc);
+               goto free_nix_lf;
+       }
+
        /* Setup LSO if needed */
        rc = nix_lso_fmt_setup(dev);
        if (rc) {
@@ -726,6 +1145,27 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
                goto cq_fini;
        }
 
+       /* Init flow control configuration */
+       fc_cfg.cq_cfg_valid = false;
+       fc_cfg.rxchan_cfg.enable = true;
+       rc = roc_nix_fc_config_set(nix, &fc_cfg);
+       if (rc) {
+               plt_err("Failed to initialize flow control rc=%d", rc);
+               goto cq_fini;
+       }
+
+       /* Update flow control configuration to PMD */
+       rc = nix_init_flow_ctrl_config(eth_dev);
+       if (rc) {
+               plt_err("Failed to initialize flow control rc=%d", rc);
+               goto cq_fini;
+       }
+
+       /* Setup Inline security support */
+       rc = nix_security_setup(dev);
+       if (rc)
+               goto cq_fini;
+
        /*
         * Restore queue config when reconfigure followed by
         * reconfigure and no queue configure invoked from application case.
@@ -733,7 +1173,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        if (dev->configured == 1) {
                rc = nix_restore_queue_cfg(eth_dev);
                if (rc)
-                       goto cq_fini;
+                       goto sec_release;
        }
 
        /* Update the mac address */
@@ -755,6 +1195,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
        dev->nb_txq = data->nb_tx_queues;
        return 0;
 
+sec_release:
+       rc |= nix_security_release(dev);
 cq_fini:
        roc_nix_unregister_cq_irqs(nix);
 q_irq_fini:
@@ -769,23 +1211,309 @@ fail_configure:
        return rc;
 }
 
+int
+cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_sq *sq = &dev->sqs[qid];
+       int rc = -EINVAL;
+
+       if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = roc_nix_tm_sq_aura_fc(sq, true);
+       if (rc) {
+               plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+       return rc;
+}
+
+int
+cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_sq *sq = &dev->sqs[qid];
+       int rc;
+
+       if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       rc = roc_nix_tm_sq_aura_fc(sq, false);
+       if (rc) {
+               plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
+                       rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_rq *rq = &dev->rqs[qid];
+       int rc;
+
+       if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = roc_nix_rq_ena_dis(rq, true);
+       if (rc) {
+               plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_rq *rq = &dev->rqs[qid];
+       int rc;
+
+       if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       rc = roc_nix_rq_ena_dis(rq, false);
+       if (rc) {
+               plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+       struct rte_mbuf *rx_pkts[32];
+       struct rte_eth_link link;
+       int count, i, j, rc;
+       void *rxq;
+
+       /* Disable switch hdr pkind */
+       roc_nix_switch_hdr_set(&dev->nix, 0);
+
+       /* Stop link change events */
+       if (!roc_nix_is_vf_or_sdp(&dev->nix))
+               roc_nix_mac_link_event_start_stop(&dev->nix, false);
+
+       /* Disable Rx via NPC */
+       roc_nix_npc_rx_ena_dis(&dev->nix, false);
+
+       /* Stop rx queues and free up pkts pending */
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rc = dev_ops->rx_queue_stop(eth_dev, i);
+               if (rc)
+                       continue;
+
+               rxq = eth_dev->data->rx_queues[i];
+               count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+               while (count) {
+                       for (j = 0; j < count; j++)
+                               rte_pktmbuf_free(rx_pkts[j]);
+                       count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+               }
+       }
+
+       /* Stop tx queues  */
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+               dev_ops->tx_queue_stop(eth_dev, i);
+
+       /* Bring down link status internally */
+       memset(&link, 0, sizeof(link));
+       rte_eth_linkstatus_set(eth_dev, &link);
+
+       return 0;
+}
+
+int
+cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       int rc, i;
+
+       if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
+               rc = nix_recalc_mtu(eth_dev);
+               if (rc)
+                       return rc;
+       }
+
+       /* Start rx queues */
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rc = cnxk_nix_rx_queue_start(eth_dev, i);
+               if (rc)
+                       return rc;
+       }
+
+       /* Start tx queues  */
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               rc = cnxk_nix_tx_queue_start(eth_dev, i);
+               if (rc)
+                       return rc;
+       }
+
+       /* Update Flow control configuration */
+       rc = nix_update_flow_ctrl_config(eth_dev);
+       if (rc) {
+               plt_err("Failed to enable flow control. error code(%d)", rc);
+               return rc;
+       }
+
+       /* Enable Rx in NPC */
+       rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
+       if (rc) {
+               plt_err("Failed to enable NPC rx %d", rc);
+               return rc;
+       }
+
+       cnxk_nix_toggle_flag_link_cfg(dev, true);
+
+       /* Start link change events */
+       if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
+               rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
+               if (rc) {
+                       plt_err("Failed to start cgx link event %d", rc);
+                       goto rx_disable;
+               }
+       }
+
+       /* Enable PTP if it is requested by the user or already
+        * enabled on PF owning this VF
+        */
+       memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
+       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
+               cnxk_eth_dev_ops.timesync_enable(eth_dev);
+       else
+               cnxk_eth_dev_ops.timesync_disable(eth_dev);
+
+       if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+               rc = rte_mbuf_dyn_rx_timestamp_register
+                       (&dev->tstamp.tstamp_dynfield_offset,
+                        &dev->tstamp.rx_tstamp_dynflag);
+               if (rc != 0) {
+                       plt_err("Failed to register Rx timestamp field/flag");
+                       goto rx_disable;
+               }
+       }
+
+       cnxk_nix_toggle_flag_link_cfg(dev, false);
+
+       return 0;
+
+rx_disable:
+       roc_nix_npc_rx_ena_dis(&dev->nix, false);
+       cnxk_nix_toggle_flag_link_cfg(dev, false);
+       return rc;
+}
+
+static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
+static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
+
 /* CNXK platform independent eth dev ops */
 struct eth_dev_ops cnxk_eth_dev_ops = {
+       .mtu_set = cnxk_nix_mtu_set,
+       .mac_addr_add = cnxk_nix_mac_addr_add,
+       .mac_addr_remove = cnxk_nix_mac_addr_del,
+       .mac_addr_set = cnxk_nix_mac_addr_set,
        .dev_infos_get = cnxk_nix_info_get,
        .link_update = cnxk_nix_link_update,
+       .tx_queue_release = cnxk_nix_tx_queue_release,
        .rx_queue_release = cnxk_nix_rx_queue_release,
+       .dev_stop = cnxk_nix_dev_stop,
+       .dev_close = cnxk_nix_dev_close,
+       .dev_reset = cnxk_nix_dev_reset,
+       .tx_queue_start = cnxk_nix_tx_queue_start,
+       .rx_queue_start = cnxk_nix_rx_queue_start,
+       .rx_queue_stop = cnxk_nix_rx_queue_stop,
+       .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
+       .promiscuous_enable = cnxk_nix_promisc_enable,
+       .promiscuous_disable = cnxk_nix_promisc_disable,
+       .allmulticast_enable = cnxk_nix_allmulticast_enable,
+       .allmulticast_disable = cnxk_nix_allmulticast_disable,
+       .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
+       .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
+       .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
+       .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+       .dev_set_link_up = cnxk_nix_set_link_up,
+       .dev_set_link_down = cnxk_nix_set_link_down,
+       .get_module_info = cnxk_nix_get_module_info,
+       .get_module_eeprom = cnxk_nix_get_module_eeprom,
+       .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
+       .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
+       .pool_ops_supported = cnxk_nix_pool_ops_supported,
+       .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
+       .stats_get = cnxk_nix_stats_get,
+       .stats_reset = cnxk_nix_stats_reset,
+       .xstats_get = cnxk_nix_xstats_get,
+       .xstats_get_names = cnxk_nix_xstats_get_names,
+       .xstats_reset = cnxk_nix_xstats_reset,
+       .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
+       .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
+       .fw_version_get = cnxk_nix_fw_version_get,
+       .rxq_info_get = cnxk_nix_rxq_info_get,
+       .txq_info_get = cnxk_nix_txq_info_get,
+       .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
+       .flow_ops_get = cnxk_nix_flow_ops_get,
+       .get_reg = cnxk_nix_dev_get_reg,
+       .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
+       .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
+       .timesync_read_time = cnxk_nix_timesync_read_time,
+       .timesync_write_time = cnxk_nix_timesync_write_time,
+       .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
+       .read_clock = cnxk_nix_read_clock,
+       .reta_update = cnxk_nix_reta_update,
+       .reta_query = cnxk_nix_reta_query,
+       .rss_hash_update = cnxk_nix_rss_hash_update,
+       .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
+       .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
+       .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
+       .tm_ops_get = cnxk_nix_tm_ops_get,
+       .mtr_ops_get = cnxk_nix_mtr_ops_get,
 };
 
 static int
 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_security_ctx *sec_ctx;
        struct roc_nix *nix = &dev->nix;
        struct rte_pci_device *pci_dev;
        int rc, max_entries;
 
        eth_dev->dev_ops = &cnxk_eth_dev_ops;
 
+       /* Alloc security context */
+       sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
+       if (!sec_ctx)
+               return -ENOMEM;
+       sec_ctx->device = eth_dev;
+       sec_ctx->ops = &cnxk_eth_sec_ops;
+       sec_ctx->flags =
+               (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
+       eth_dev->security_ctx = sec_ctx;
+       TAILQ_INIT(&dev->inb.list);
+       TAILQ_INIT(&dev->outb.list);
+
        /* For secondary processes, the primary has done all the work */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
@@ -802,6 +1530,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Initialize base roc nix */
        nix->pci_dev = pci_dev;
+       nix->hw_vlan_ins = true;
        rc = roc_nix_dev_init(nix);
        if (rc) {
                plt_err("Failed to initialize roc nix rc=%d", rc);
@@ -811,8 +1540,13 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        /* Register up msg callbacks */
        roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
 
+       /* Register up msg callbacks */
+       roc_nix_mac_link_info_get_cb_register(nix,
+                                             cnxk_eth_dev_link_status_get_cb);
+
        dev->eth_dev = eth_dev;
        dev->configured = 0;
+       dev->ptype_disable = 0;
 
        /* For vfs, returned max_entries will be 0. but to keep default mac
         * address, one entry must be allocated. so setting up to 1.
@@ -837,6 +1571,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        dev->max_mac_entries = max_entries;
+       dev->dmac_filter_count = 1;
 
        /* Get mac address */
        rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
@@ -866,6 +1601,11 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        dev->speed_capa = nix_get_speed_capa(dev);
 
        /* Initialize roc npc */
+       dev->npc.roc_nix = nix;
+       rc = roc_npc_init(&dev->npc);
+       if (rc)
+               goto free_mac_addrs;
+
        plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
                    " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
                    eth_dev->data->port_id, roc_nix_get_pf(nix),
@@ -883,13 +1623,16 @@ error:
 }
 
 static int
-cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
+cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 {
        struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
        const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
        struct roc_nix *nix = &dev->nix;
        int rc, i;
 
+       plt_free(eth_dev->security_ctx);
+       eth_dev->security_ctx = NULL;
+
        /* Nothing to be done for secondary processes */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
@@ -899,23 +1642,34 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
 
        roc_nix_npc_rx_ena_dis(nix, false);
 
+       /* Disable and free rte_flow entries */
+       roc_npc_fini(&dev->npc);
+
        /* Disable link status events */
        roc_nix_mac_link_event_start_stop(nix, false);
 
+       /* Unregister the link update op, this is required to stop VFs from
+        * receiving link status updates on exit path.
+        */
+       roc_nix_mac_link_cb_unregister(nix);
+
        /* Free up SQs */
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
-               dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
+               dev_ops->tx_queue_release(eth_dev, i);
                eth_dev->data->tx_queues[i] = NULL;
        }
        eth_dev->data->nb_tx_queues = 0;
 
        /* Free up RQ's and CQ's */
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-               dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
+               dev_ops->rx_queue_release(eth_dev, i);
                eth_dev->data->rx_queues[i] = NULL;
        }
        eth_dev->data->nb_rx_queues = 0;
 
+       /* Free security resources */
+       nix_security_release(dev);
+
        /* Free tm resources */
        roc_nix_tm_fini(nix);
 
@@ -937,14 +1691,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
        rte_free(eth_dev->data->mac_addrs);
        eth_dev->data->mac_addrs = NULL;
 
-       /* Check if mbox close is needed */
-       if (!mbox_close)
-               return 0;
-
        rc = roc_nix_dev_fini(nix);
        /* Can be freed later by PMD if NPA LF is in use */
        if (rc == -EAGAIN) {
-               eth_dev->data->dev_private = NULL;
+               if (!reset)
+                       eth_dev->data->dev_private = NULL;
                return 0;
        } else if (rc) {
                plt_err("Failed in nix dev fini, rc=%d", rc);
@@ -953,6 +1704,25 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
        return rc;
 }
 
+static int
+cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
+{
+       cnxk_eth_dev_uninit(eth_dev, false);
+       return 0;
+}
+
+static int
+cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
+{
+       int rc;
+
+       rc = cnxk_eth_dev_uninit(eth_dev, true);
+       if (rc)
+               return rc;
+
+       return cnxk_eth_dev_init(eth_dev);
+}
+
 int
 cnxk_nix_remove(struct rte_pci_device *pci_dev)
 {
@@ -963,7 +1733,7 @@ cnxk_nix_remove(struct rte_pci_device *pci_dev)
        eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
        if (eth_dev) {
                /* Cleanup eth dev */
-               rc = cnxk_eth_dev_uninit(eth_dev, true);
+               rc = cnxk_eth_dev_uninit(eth_dev, false);
                if (rc)
                        return rc;