mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / octeontx2 / otx2_ethdev.c
index f26d42b..4f1c0b9 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <inttypes.h>
 
-#include <rte_ethdev_pci.h>
+#include <ethdev_pci.h>
 #include <rte_io.h>
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
@@ -21,7 +21,7 @@ nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
 
        if (otx2_dev_is_vf(dev) ||
            dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
-               capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+               capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
        return capa;
 }
@@ -33,16 +33,17 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
 
        /* TSO not supported for earlier chip revisions */
        if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
-               capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
-                         DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                         DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-                         DEV_TX_OFFLOAD_GRE_TNL_TSO);
+               capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
+                         RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+                         RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+                         RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
        return capa;
 }
 
 static const struct otx2_dev_ops otx2_dev_ops = {
        .link_status_update = otx2_eth_dev_link_status_update,
-       .ptp_info_update = otx2_eth_dev_ptp_info_update
+       .ptp_info_update = otx2_eth_dev_ptp_info_update,
+       .link_status_get = otx2_eth_dev_link_status_get,
 };
 
 static int
@@ -65,8 +66,8 @@ nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
        req->npa_func = otx2_npa_pf_func_get();
        req->sso_func = otx2_sso_pf_func_get();
        req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
-       if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-                        DEV_RX_OFFLOAD_UDP_CKSUM)) {
+       if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+                        RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
                req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
                req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
        }
@@ -115,6 +116,23 @@ nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
        /* Notify AF about higig2 config */
        req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
        req->mode = dev->npc_flow.switch_header_type;
+       if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
+               req->mode = OTX2_PRIV_FLAGS_CUSTOM;
+               req->pkind = NPC_RX_CHLEN90B_PKIND;
+       } else if (dev->npc_flow.switch_header_type ==
+                  OTX2_PRIV_FLAGS_CH_LEN_24B) {
+               req->mode = OTX2_PRIV_FLAGS_CUSTOM;
+               req->pkind = NPC_RX_CHLEN24B_PKIND;
+       } else if (dev->npc_flow.switch_header_type ==
+                  OTX2_PRIV_FLAGS_EXDSA) {
+               req->mode = OTX2_PRIV_FLAGS_CUSTOM;
+               req->pkind = NPC_RX_EXDSA_PKIND;
+       } else if (dev->npc_flow.switch_header_type ==
+                  OTX2_PRIV_FLAGS_VLAN_EXDSA) {
+               req->mode = OTX2_PRIV_FLAGS_CUSTOM;
+               req->pkind = NPC_RX_VLAN_EXDSA_PKIND;
+       }
+
        if (enable == 0)
                req->mode = OTX2_PRIV_FLAGS_DEFAULT;
        req->dir = PKIND_RX;
@@ -123,6 +141,10 @@ nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
                return rc;
        req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
        req->mode = dev->npc_flow.switch_header_type;
+       if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B ||
+           dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_24B)
+               req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+
        if (enable == 0)
                req->mode = OTX2_PRIV_FLAGS_DEFAULT;
        req->dir = PKIND_TX;
@@ -292,8 +314,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
                                      NIX_CQ_ALIGN, dev->node);
        if (rz == NULL) {
                otx2_err("Failed to allocate mem for cq hw ring");
-               rc = -ENOMEM;
-               goto fail;
+               return -ENOMEM;
        }
        memset(rz->addr, 0, rz->len);
        rxq->desc = (uintptr_t)rz->addr;
@@ -342,7 +363,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
        rc = otx2_mbox_process(mbox);
        if (rc) {
                otx2_err("Failed to init cq context");
-               goto fail;
+               return rc;
        }
 
        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -352,7 +373,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 
        aq->rq.sso_ena = 0;
 
-       if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
                aq->rq.ipsech_ena = 1;
 
        aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
@@ -367,10 +388,7 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
        aq->rq.first_skip = first_skip;
        aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
        aq->rq.flow_tagw = 32; /* 32-bits */
-       aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
-       aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
-       aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
-       aq->rq.lpb_sizem1 /= 8;
+       aq->rq.lpb_sizem1 = mp->elt_size / 8;
        aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
        aq->rq.ena = 1;
        aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
@@ -384,12 +402,44 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
        rc = otx2_mbox_process(mbox);
        if (rc) {
                otx2_err("Failed to init rq context");
-               goto fail;
+               return rc;
+       }
+
+       if (dev->lock_rx_ctx) {
+               aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+               aq->qidx = qid;
+               aq->ctype = NIX_AQ_CTYPE_CQ;
+               aq->op = NIX_AQ_INSTOP_LOCK;
+
+               aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+               if (!aq) {
+                       /* The shared memory buffer can be full.
+                        * Flush it and retry
+                        */
+                       otx2_mbox_msg_send(mbox, 0);
+                       rc = otx2_mbox_wait_for_rsp(mbox, 0);
+                       if (rc < 0) {
+                               otx2_err("Failed to LOCK cq context");
+                               return rc;
+                       }
+
+                       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+                       if (!aq) {
+                               otx2_err("Failed to LOCK rq context");
+                               return -ENOMEM;
+                       }
+               }
+               aq->qidx = qid;
+               aq->ctype = NIX_AQ_CTYPE_RQ;
+               aq->op = NIX_AQ_INSTOP_LOCK;
+               rc = otx2_mbox_process(mbox);
+               if (rc < 0) {
+                       otx2_err("Failed to LOCK rq context");
+                       return rc;
+               }
        }
 
        return 0;
-fail:
-       return rc;
 }
 
 static int
@@ -436,6 +486,40 @@ nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
                return rc;
        }
 
+       if (dev->lock_rx_ctx) {
+               aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+               aq->qidx = rxq->rq;
+               aq->ctype = NIX_AQ_CTYPE_CQ;
+               aq->op = NIX_AQ_INSTOP_UNLOCK;
+
+               aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+               if (!aq) {
+                       /* The shared memory buffer can be full.
+                        * Flush it and retry
+                        */
+                       otx2_mbox_msg_send(mbox, 0);
+                       rc = otx2_mbox_wait_for_rsp(mbox, 0);
+                       if (rc < 0) {
+                               otx2_err("Failed to UNLOCK cq context");
+                               return rc;
+                       }
+
+                       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+                       if (!aq) {
+                               otx2_err("Failed to UNLOCK rq context");
+                               return -ENOMEM;
+                       }
+               }
+               aq->qidx = rxq->rq;
+               aq->ctype = NIX_AQ_CTYPE_RQ;
+               aq->op = NIX_AQ_INSTOP_UNLOCK;
+               rc = otx2_mbox_process(mbox);
+               if (rc < 0) {
+                       otx2_err("Failed to UNLOCK rq context");
+                       return rc;
+               }
+       }
+
        return 0;
 }
 
@@ -471,16 +555,17 @@ otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
 }
 
 static void
-otx2_nix_rx_queue_release(void *rx_queue)
+otx2_nix_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct otx2_eth_rxq *rxq = rx_queue;
+       struct otx2_eth_rxq *rxq = dev->data->rx_queues[qid];
 
        if (!rxq)
                return;
 
        otx2_nix_dbg("Releasing rxq %u", rxq->rq);
        nix_cq_rq_uninit(rxq->eth_dev, rxq);
-       rte_free(rx_queue);
+       rte_free(rxq);
+       dev->data->rx_queues[qid] = NULL;
 }
 
 static int
@@ -524,8 +609,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
        /* Free memory prior to re-allocation if needed */
        if (eth_dev->data->rx_queues[rq] != NULL) {
                otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
-               otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
-               eth_dev->data->rx_queues[rq] = NULL;
+               otx2_nix_rx_queue_release(eth_dev, rq);
+               rte_eth_dma_zone_free(eth_dev, "cq", rq);
        }
 
        offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
@@ -556,6 +641,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
        rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
        rxq->tstamp = &dev->tstamp;
 
+       eth_dev->data->rx_queues[rq] = rxq;
+
        /* Alloc completion queue */
        rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
        if (rc) {
@@ -572,14 +659,13 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
        otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
                     rq, mp->name, qsize, nb_desc, rxq->qlen);
 
-       eth_dev->data->rx_queues[rq] = rxq;
        eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
 
        /* Calculating delta and freq mult between PTP HI clock and tsc.
         * These are needed in deriving raw clock value from tsc counter.
         * read_clock eth op returns raw clock value.
         */
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
            otx2_ethdev_is_ptp_en(dev)) {
                rc = otx2_nix_raw_clock_tsc_conv(dev);
                if (rc) {
@@ -588,10 +674,13 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
                }
        }
 
+       /* Setup scatter mode if needed by jumbo */
+       otx2_nix_enable_mseg_on_jumbo(rxq);
+
        return 0;
 
 free_rxq:
-       otx2_nix_rx_queue_release(rxq);
+       otx2_nix_rx_queue_release(eth_dev, rq);
 fail:
        return rc;
 }
@@ -603,7 +692,7 @@ nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
         * Maximum three segments can be supported with W8, Choose
         * NIX_MAXSQESZ_W16 for multi segment offload.
         */
-       if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+       if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
                return NIX_MAXSQESZ_W16;
        else
                return NIX_MAXSQESZ_W8;
@@ -618,29 +707,29 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
        struct rte_eth_rxmode *rxmode = &conf->rxmode;
        uint16_t flags = 0;
 
-       if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
-                       (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+       if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
+                       (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
                flags |= NIX_RX_OFFLOAD_RSS_F;
 
-       if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
-                        DEV_RX_OFFLOAD_UDP_CKSUM))
+       if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+                        RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-       if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
-                               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+       if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+                               RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
                flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
                flags |= NIX_RX_MULTI_SEG_F;
 
-       if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
-                               DEV_RX_OFFLOAD_QINQ_STRIP))
+       if (dev->rx_offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+                               RTE_ETH_RX_OFFLOAD_QINQ_STRIP))
                flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
 
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
                flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
-       if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+       if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
                flags |= NIX_RX_OFFLOAD_SECURITY_F;
 
        if (!dev->ptype_disable)
@@ -657,15 +746,15 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
        uint16_t flags = 0;
 
        /* Fastpath is dependent on these enums */
-       RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
-       RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
-       RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
-       RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
-       RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
-       RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
+       RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
        RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
        RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
        RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
@@ -679,48 +768,163 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
                         offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
 
-       if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
-           conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+       if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+           conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
                flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
 
-       if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
-           conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+       if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+           conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
                flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
 
-       if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
-           conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
-           conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
-           conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+       if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+           conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+           conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+           conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
                flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
 
-       if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+       if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
                flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
 
-       if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+       if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
                flags |= NIX_TX_MULTI_SEG_F;
 
        /* Enable Inner checksum for TSO */
-       if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+       if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
                flags |= (NIX_TX_OFFLOAD_TSO_F |
                          NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
        /* Enable Inner and Outer checksum for Tunnel TSO */
-       if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                   DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-                   DEV_TX_OFFLOAD_GRE_TNL_TSO))
+       if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+                   RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+                   RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
                flags |= (NIX_TX_OFFLOAD_TSO_F |
                          NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
                          NIX_TX_OFFLOAD_L3_L4_CSUM_F);
 
-       if (conf & DEV_TX_OFFLOAD_SECURITY)
+       if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
                flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
                flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
        return flags;
 }
 
+static int
+nix_sqb_lock(struct rte_mempool *mp)
+{
+       struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+       struct npa_aq_enq_req *req;
+       int rc;
+
+       req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+       req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+       req->ctype = NPA_AQ_CTYPE_AURA;
+       req->op = NPA_AQ_INSTOP_LOCK;
+
+       req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+       if (!req) {
+               /* The shared memory buffer can be full.
+                * Flush it and retry
+                */
+               otx2_mbox_msg_send(npa_lf->mbox, 0);
+               rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+               if (rc < 0) {
+                       otx2_err("Failed to LOCK AURA context");
+                       return rc;
+               }
+
+               req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+               if (!req) {
+                       otx2_err("Failed to LOCK POOL context");
+                       return -ENOMEM;
+               }
+       }
+
+       req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+       req->ctype = NPA_AQ_CTYPE_POOL;
+       req->op = NPA_AQ_INSTOP_LOCK;
+
+       rc = otx2_mbox_process(npa_lf->mbox);
+       if (rc < 0) {
+               otx2_err("Unable to lock POOL in NDC");
+               return rc;
+       }
+
+       return 0;
+}
+
+static int
+nix_sqb_unlock(struct rte_mempool *mp)
+{
+       struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+       struct npa_aq_enq_req *req;
+       int rc;
+
+       req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+       req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+       req->ctype = NPA_AQ_CTYPE_AURA;
+       req->op = NPA_AQ_INSTOP_UNLOCK;
+
+       req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+       if (!req) {
+               /* The shared memory buffer can be full.
+                * Flush it and retry
+                */
+               otx2_mbox_msg_send(npa_lf->mbox, 0);
+               rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+               if (rc < 0) {
+                       otx2_err("Failed to UNLOCK AURA context");
+                       return rc;
+               }
+
+               req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+               if (!req) {
+                       otx2_err("Failed to UNLOCK POOL context");
+                       return -ENOMEM;
+               }
+       }
+       req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+       req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+       req->ctype = NPA_AQ_CTYPE_POOL;
+       req->op = NPA_AQ_INSTOP_UNLOCK;
+
+       rc = otx2_mbox_process(npa_lf->mbox);
+       if (rc < 0) {
+               otx2_err("Unable to UNLOCK AURA in NDC");
+               return rc;
+       }
+
+       return 0;
+}
+
+void
+otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
+{
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       struct rte_eth_dev *eth_dev;
+       struct otx2_eth_dev *dev;
+       uint32_t buffsz;
+
+       eth_dev = rxq->eth_dev;
+       dev = otx2_eth_pmd_priv(eth_dev);
+
+       /* Get rx buffer size */
+       mbp_priv = rte_mempool_get_priv(rxq->pool);
+       buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+       if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
+               dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+               dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
+               /* Setting up the rx[tx]_offload_flags due to change
+                * in rx[tx]_offloads.
+                */
+               dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
+               dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
+       }
+}
+
 static int
 nix_sq_init(struct otx2_eth_txq *txq)
 {
@@ -763,7 +967,20 @@ nix_sq_init(struct otx2_eth_txq *txq)
        /* Many to one reduction */
        sq->sq.qint_idx = txq->sq % dev->qints;
 
-       return otx2_mbox_process(mbox);
+       rc = otx2_mbox_process(mbox);
+       if (rc < 0)
+               return rc;
+
+       if (dev->lock_tx_ctx) {
+               sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+               sq->qidx = txq->sq;
+               sq->ctype = NIX_AQ_CTYPE_SQ;
+               sq->op = NIX_AQ_INSTOP_LOCK;
+
+               rc = otx2_mbox_process(mbox);
+       }
+
+       return rc;
 }
 
 static int
@@ -806,6 +1023,20 @@ nix_sq_uninit(struct otx2_eth_txq *txq)
        if (rc)
                return rc;
 
+       if (dev->lock_tx_ctx) {
+               /* Unlock sq */
+               aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+               aq->qidx = txq->sq;
+               aq->ctype = NIX_AQ_CTYPE_SQ;
+               aq->op = NIX_AQ_INSTOP_UNLOCK;
+
+               rc = otx2_mbox_process(mbox);
+               if (rc < 0)
+                       return rc;
+
+               nix_sqb_unlock(txq->sqb_pool);
+       }
+
        /* Read SQ and free sqb's */
        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
        aq->qidx = txq->sq;
@@ -893,7 +1124,7 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
 
        txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
                                                 0, 0, dev->node,
-                                                MEMPOOL_F_NO_SPREAD);
+                                                RTE_MEMPOOL_F_NO_SPREAD);
        txq->nb_sqb_bufs = nb_sqb_bufs;
        txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
        txq->nb_sqb_bufs_adj = nb_sqb_bufs -
@@ -919,7 +1150,7 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
                goto fail;
        }
 
-       tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
+       tmp = rte_mempool_calc_obj_size(blk_sz, RTE_MEMPOOL_F_NO_SPREAD, &sz);
        if (dev->sqb_size != sz.elt_size) {
                otx2_err("sqe pool block size is not expected %d != %d",
                         dev->sqb_size, tmp);
@@ -927,6 +1158,8 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
        }
 
        nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
+       if (dev->lock_tx_ctx)
+               nix_sqb_lock(txq->sqb_pool);
 
        return 0;
 fail:
@@ -985,16 +1218,13 @@ otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
 }
 
 static void
-otx2_nix_tx_queue_release(void *_txq)
+otx2_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 {
-       struct otx2_eth_txq *txq = _txq;
-       struct rte_eth_dev *eth_dev;
+       struct otx2_eth_txq *txq = eth_dev->data->tx_queues[qid];
 
        if (!txq)
                return;
 
-       eth_dev = txq->dev->eth_dev;
-
        otx2_nix_dbg("Releasing txq %u", txq->sq);
 
        /* Flush and disable tm */
@@ -1009,6 +1239,7 @@ otx2_nix_tx_queue_release(void *_txq)
        }
        otx2_nix_sq_flush_post(txq);
        rte_free(txq);
+       eth_dev->data->tx_queues[qid] = NULL;
 }
 
 
@@ -1036,8 +1267,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
        /* Free memory prior to re-allocation if needed. */
        if (eth_dev->data->tx_queues[sq] != NULL) {
                otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
-               otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
-               eth_dev->data->tx_queues[sq] = NULL;
+               otx2_nix_tx_queue_release(eth_dev, sq);
        }
 
        /* Find the expected offloads for this queue */
@@ -1056,6 +1286,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
        txq->sqb_pool = NULL;
        txq->offloads = offloads;
        dev->tx_offloads |= offloads;
+       eth_dev->data->tx_queues[sq] = txq;
 
        /*
         * Allocate memory for flow control updates from HW.
@@ -1095,18 +1326,18 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
        txq->qconf.nb_desc = nb_desc;
        memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
 
+       txq->lso_tun_fmt = dev->lso_tun_fmt;
        otx2_nix_form_default_desc(txq);
 
        otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
                     " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
                     fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
                     txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
-       eth_dev->data->tx_queues[sq] = txq;
        eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
 
 free_txq:
-       otx2_nix_tx_queue_release(txq);
+       otx2_nix_tx_queue_release(eth_dev, sq);
 fail:
        return rc;
 }
@@ -1139,23 +1370,25 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
        txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
        for (i = 0; i < nb_txq; i++) {
                if (txq[i] == NULL) {
-                       otx2_err("txq[%d] is already released", i);
-                       goto fail;
+                       tx_qconf[i].valid = false;
+                       otx2_info("txq[%d] is already released", i);
+                       continue;
                }
                memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
-               otx2_nix_tx_queue_release(txq[i]);
-               eth_dev->data->tx_queues[i] = NULL;
+               tx_qconf[i].valid = true;
+               otx2_nix_tx_queue_release(eth_dev, i);
        }
 
        rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
        for (i = 0; i < nb_rxq; i++) {
                if (rxq[i] == NULL) {
-                       otx2_err("rxq[%d] is already released", i);
-                       goto fail;
+                       rx_qconf[i].valid = false;
+                       otx2_info("rxq[%d] is already released", i);
+                       continue;
                }
                memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
-               otx2_nix_rx_queue_release(rxq[i]);
-               eth_dev->data->rx_queues[i] = NULL;
+               rx_qconf[i].valid = true;
+               otx2_nix_rx_queue_release(eth_dev, i);
        }
 
        dev->tx_qconf = tx_qconf;
@@ -1163,10 +1396,8 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
        return 0;
 
 fail:
-       if (tx_qconf)
-               free(tx_qconf);
-       if (rx_qconf)
-               free(rx_qconf);
+       free(tx_qconf);
+       free(rx_qconf);
 
        return -ENOMEM;
 }
@@ -1177,8 +1408,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
        struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
        struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
-       struct otx2_eth_txq **txq;
-       struct otx2_eth_rxq **rxq;
        int rc, i, nb_rxq, nb_txq;
 
        nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
@@ -1208,14 +1437,15 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
         * queues are already setup in port_configure().
         */
        for (i = 0; i < nb_txq; i++) {
+               if (!tx_qconf[i].valid)
+                       continue;
                rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
                                             tx_qconf[i].socket_id,
                                             &tx_qconf[i].conf.tx);
                if (rc) {
                        otx2_err("Failed to setup tx queue rc=%d", rc);
-                       txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
                        for (i -= 1; i >= 0; i--)
-                               otx2_nix_tx_queue_release(txq[i]);
+                               otx2_nix_tx_queue_release(eth_dev, i);
                        goto fail;
                }
        }
@@ -1223,15 +1453,16 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        free(tx_qconf); tx_qconf = NULL;
 
        for (i = 0; i < nb_rxq; i++) {
+               if (!rx_qconf[i].valid)
+                       continue;
                rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
                                             rx_qconf[i].socket_id,
                                             &rx_qconf[i].conf.rx,
                                             rx_qconf[i].mempool);
                if (rc) {
                        otx2_err("Failed to setup rx queue rc=%d", rc);
-                       rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
                        for (i -= 1; i >= 0; i--)
-                               otx2_nix_rx_queue_release(rxq[i]);
+                               otx2_nix_rx_queue_release(eth_dev, i);
                        goto release_tx_queues;
                }
        }
@@ -1241,9 +1472,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        return 0;
 
 release_tx_queues:
-       txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
-               otx2_nix_tx_queue_release(txq[i]);
+               otx2_nix_tx_queue_release(eth_dev, i);
 fail:
        if (tx_qconf)
                free(tx_qconf);
@@ -1439,7 +1669,7 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        struct otx2_mbox *mbox = dev->mbox;
        struct nix_lso_format_cfg_rsp *rsp;
        struct nix_lso_format_cfg *req;
-       uint8_t base;
+       uint8_t *fmt;
        int rc;
 
        /* Skip if TSO was not requested */
@@ -1454,11 +1684,9 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       base = rsp->lso_format_idx;
-       if (base != NIX_LSO_FORMAT_IDX_TSOV4)
+       if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV4)
                return -EFAULT;
-       dev->lso_base_idx = base;
-       otx2_nix_dbg("tcpv4 lso fmt=%u", base);
+       otx2_nix_dbg("tcpv4 lso fmt=%u", rsp->lso_format_idx);
 
 
        /*
@@ -1470,9 +1698,9 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       if (rsp->lso_format_idx != base + 1)
+       if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV6)
                return -EFAULT;
-       otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
+       otx2_nix_dbg("tcpv6 lso fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv4/UDP/TUN HDR/IPv4/TCP LSO
@@ -1483,9 +1711,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       if (rsp->lso_format_idx != base + 2)
-               return -EFAULT;
-       otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
+       dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
+       otx2_nix_dbg("udp tun v4v4 fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv4/UDP/TUN HDR/IPv6/TCP LSO
@@ -1496,9 +1723,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       if (rsp->lso_format_idx != base + 3)
-               return -EFAULT;
-       otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
+       dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
+       otx2_nix_dbg("udp tun v4v6 fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv6/UDP/TUN HDR/IPv4/TCP LSO
@@ -1509,9 +1735,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       if (rsp->lso_format_idx != base + 4)
-               return -EFAULT;
-       otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
+       dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
+       otx2_nix_dbg("udp tun v6v4 fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv6/UDP/TUN HDR/IPv6/TCP LSO
@@ -1521,9 +1746,9 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
        if (rc)
                return rc;
-       if (rsp->lso_format_idx != base + 5)
-               return -EFAULT;
-       otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
+
+       dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
+       otx2_nix_dbg("udp tun v6v6 fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv4/TUN HDR/IPv4/TCP LSO
@@ -1534,9 +1759,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       if (rsp->lso_format_idx != base + 6)
-               return -EFAULT;
-       otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
+       dev->lso_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
+       otx2_nix_dbg("tun v4v4 fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv4/TUN HDR/IPv6/TCP LSO
@@ -1547,9 +1771,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       if (rsp->lso_format_idx != base + 7)
-               return -EFAULT;
-       otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
+       dev->lso_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
+       otx2_nix_dbg("tun v4v6 fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv6/TUN HDR/IPv4/TCP LSO
@@ -1560,9 +1783,8 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        if (rc)
                return rc;
 
-       if (rsp->lso_format_idx != base + 8)
-               return -EFAULT;
-       otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
+       dev->lso_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
+       otx2_nix_dbg("tun v6v4 fmt=%u\n", rsp->lso_format_idx);
 
        /*
         * IPv6/TUN HDR/IPv6/TCP LSO
@@ -1572,9 +1794,26 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev)
        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
        if (rc)
                return rc;
-       if (rsp->lso_format_idx != base + 9)
-               return -EFAULT;
-       otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
+
+       dev->lso_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
+       otx2_nix_dbg("tun v6v6 fmt=%u\n", rsp->lso_format_idx);
+
+       /* Save all tun formats into u64 for fast path.
+        * Lower 32bit has non-udp tunnel formats.
+        * Upper 32bit has udp tunnel formats.
+        */
+       fmt = dev->lso_tun_idx;
+       dev->lso_tun_fmt = ((uint64_t)fmt[NIX_LSO_TUN_V4V4] |
+                           (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 8 |
+                           (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 16 |
+                           (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 24);
+
+       fmt = dev->lso_udp_tun_idx;
+       dev->lso_tun_fmt |= ((uint64_t)fmt[NIX_LSO_TUN_V4V4] << 32 |
+                            (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 40 |
+                            (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 48 |
+                            (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 56);
+
        return 0;
 }
 
@@ -1609,21 +1848,21 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
                goto fail_configure;
        }
 
-       if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-           rxmode->mq_mode != ETH_MQ_RX_RSS) {
+       if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+           rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
                otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
                goto fail_configure;
        }
 
-       if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+       if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
                otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
                goto fail_configure;
        }
 
        if (otx2_dev_is_Ax(dev) &&
-           (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
-           ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
-           (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+           (txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
+           ((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+           (txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
                otx2_err("Outer IP and SCTP checksum unsupported");
                goto fail_configure;
        }
@@ -1917,12 +2156,13 @@ done:
        return rc;
 }
 
-static void
+static int
 otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
 {
        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
        struct rte_mbuf *rx_pkts[32];
        struct otx2_eth_rxq *rxq;
+       struct rte_eth_link link;
        int count, i, j, rc;
 
        nix_lf_switch_header_type_enable(dev, false);
@@ -1947,6 +2187,12 @@ otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
        /* Stop tx queues  */
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
                otx2_nix_tx_queue_stop(eth_dev, i);
+
+       /* Bring down link status internally */
+       memset(&link, 0, sizeof(link));
+       rte_eth_linkstatus_set(eth_dev, &link);
+
+       return 0;
 }
 
 static int
@@ -1989,7 +2235,7 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
         * enabled in PF owning this VF
         */
        memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
-       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+       if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
            otx2_ethdev_is_ptp_en(dev))
                otx2_nix_timesync_enable(eth_dev);
        else
@@ -2001,6 +2247,16 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
        if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
                otx2_nix_ptp_enable_vf(eth_dev);
 
+       if (dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F) {
+               rc = rte_mbuf_dyn_rx_timestamp_register(
+                               &dev->tstamp.tstamp_dynfield_offset,
+                               &dev->tstamp.rx_tstamp_dynflag);
+               if (rc != 0) {
+                       otx2_err("Failed to register Rx timestamp field/flag");
+                       return -rte_errno;
+               }
+       }
+
        rc = npc_rx_enable(dev);
        if (rc) {
                otx2_err("Failed to enable NPC rx %d", rc);
@@ -2028,7 +2284,7 @@ rx_disable:
 }
 
 static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
-static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
+static int otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
 
 /* Initialize and register driver with DPDK Application */
 static const struct eth_dev_ops otx2_eth_dev_ops = {
@@ -2037,6 +2293,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
        .link_update              = otx2_nix_link_update,
        .tx_queue_setup           = otx2_nix_tx_queue_setup,
        .tx_queue_release         = otx2_nix_tx_queue_release,
+       .tm_ops_get               = otx2_nix_tm_ops_get,
        .rx_queue_setup           = otx2_nix_rx_queue_setup,
        .rx_queue_release         = otx2_nix_rx_queue_release,
        .dev_start                = otx2_nix_dev_start,
@@ -2077,13 +2334,10 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
        .txq_info_get             = otx2_nix_txq_info_get,
        .rx_burst_mode_get        = otx2_rx_burst_mode_get,
        .tx_burst_mode_get        = otx2_tx_burst_mode_get,
-       .rx_queue_count           = otx2_nix_rx_queue_count,
-       .rx_descriptor_done       = otx2_nix_rx_descriptor_done,
-       .rx_descriptor_status     = otx2_nix_rx_descriptor_status,
-       .tx_descriptor_status     = otx2_nix_tx_descriptor_status,
        .tx_done_cleanup          = otx2_nix_tx_done_cleanup,
+       .set_queue_rate_limit     = otx2_nix_tm_set_queue_rate_limit,
        .pool_ops_supported       = otx2_nix_pool_ops_supported,
-       .filter_ctrl              = otx2_nix_dev_filter_ctrl,
+       .flow_ops_get             = otx2_nix_dev_flow_ops_get,
        .get_module_info          = otx2_nix_get_module_info,
        .get_module_eeprom        = otx2_nix_get_module_eeprom,
        .fw_version_get           = otx2_nix_fw_version_get,
@@ -2164,6 +2418,20 @@ otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
        return false;
 }
 
+static inline uint64_t
+nix_get_blkaddr(struct otx2_eth_dev *dev)
+{
+       uint64_t reg;
+
+       /* Reading the discovery register to know which NIX is the LF
+        * attached to.
+        */
+       reg = otx2_read64(dev->bar2 +
+                         RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_NIX0));
+
+       return reg & 0x1FFULL ? RVU_BLOCK_ADDR_NIX0 : RVU_BLOCK_ADDR_NIX1;
+}
+
 static int
 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
@@ -2172,6 +2440,9 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        int rc, max_entries;
 
        eth_dev->dev_ops = &otx2_eth_dev_ops;
+       eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
+       eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
+       eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
 
        /* For secondary processes, the primary has done all the work */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
@@ -2184,7 +2455,7 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
        memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
@@ -2223,7 +2494,6 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        dev->configured = 0;
        dev->drv_inited = true;
        dev->ptype_disable = 0;
-       dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
        dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
 
        /* Attach NIX LF */
@@ -2231,6 +2501,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        if (rc)
                goto otx2_npa_uninit;
 
+       dev->base = dev->bar2 + (nix_get_blkaddr(dev) << 20);
+
        /* Get NIX MSIX offset */
        rc = nix_lf_get_msix_offset(dev);
        if (rc)
@@ -2291,8 +2563,8 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        rc = otx2_eth_sec_ctx_create(eth_dev);
        if (rc)
                goto free_mac_addrs;
-       dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-       dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+       dev->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
+       dev->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 
        /* Initialize rte-flow */
        rc = otx2_flow_init(dev);
@@ -2359,18 +2631,19 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
 
        nix_cgx_stop_link_event(dev);
 
+       /* Unregister the dev ops, this is required to stop VFs from
+        * receiving link status updates on exit path.
+        */
+       dev->ops = NULL;
+
        /* Free up SQs */
-       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
-               otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
-               eth_dev->data->tx_queues[i] = NULL;
-       }
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+               otx2_nix_tx_queue_release(eth_dev, i);
        eth_dev->data->nb_tx_queues = 0;
 
        /* Free up RQ's and CQ's */
-       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-               otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
-               eth_dev->data->rx_queues[i] = NULL;
-       }
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+               otx2_nix_rx_queue_release(eth_dev, i);
        eth_dev->data->nb_rx_queues = 0;
 
        /* Free tm resources */
@@ -2424,10 +2697,11 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
        return 0;
 }
 
-static void
+static int
 otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
 {
        otx2_eth_dev_uninit(eth_dev, true);
+       return 0;
 }
 
 static int
@@ -2457,7 +2731,7 @@ nix_remove(struct rte_pci_device *pci_dev)
                if (rc)
                        return rc;
 
-               rte_eth_dev_pci_release(eth_dev);
+               rte_eth_dev_release_port(eth_dev);
        }
 
        /* Nothing to be done for secondary processes */
@@ -2535,6 +2809,6 @@ static struct rte_pci_driver pci_nix = {
        .remove = nix_remove,
 };
 
-RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
-RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");
+RTE_PMD_REGISTER_PCI(OCTEONTX2_PMD, pci_nix);
+RTE_PMD_REGISTER_PCI_TABLE(OCTEONTX2_PMD, pci_nix_map);
+RTE_PMD_REGISTER_KMOD_DEP(OCTEONTX2_PMD, "vfio-pci");