net/octeontx2: support base PTP
[dpdk.git] / drivers / net / octeontx2 / otx2_ethdev.c
index 826ce7f..6ab8ed7 100644 (file)
@@ -2,9 +2,15 @@
  * Copyright(C) 2019 Marvell International Ltd.
  */
 
+#include <inttypes.h>
+#include <math.h>
+
 #include <rte_ethdev_pci.h>
 #include <rte_io.h>
 #include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_mempool.h>
 
 #include "otx2_ethdev.h"
 
@@ -114,6 +120,932 @@ nix_lf_free(struct otx2_eth_dev *dev)
        return otx2_mbox_process(mbox);
 }
 
+int
+otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+
+       if (otx2_dev_is_vf(dev))
+               return 0;
+
+       otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
+
+       return otx2_mbox_process(mbox);
+}
+
+int
+otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+
+       if (otx2_dev_is_vf(dev))
+               return 0;
+
+       otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
+
+       return otx2_mbox_process(mbox);
+}
+
+static inline void
+nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
+{
+       rxq->head = 0;
+       rxq->available = 0;
+}
+
+static inline uint32_t
+nix_qsize_to_val(enum nix_q_size_e qsize)
+{
+       return (16UL << (qsize * 2));
+}
+
+static inline enum nix_q_size_e
+nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
+{
+       int i;
+
+       if (otx2_ethdev_fixup_is_min_4k_q(dev))
+               i = nix_q_size_4K;
+       else
+               i = nix_q_size_16;
+
+       for (; i < nix_q_size_max; i++)
+               if (val <= nix_qsize_to_val(i))
+                       break;
+
+       if (i >= nix_q_size_max)
+               i = nix_q_size_max - 1;
+
+       return i;
+}
+
+static int
+nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
+              uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+       const struct rte_memzone *rz;
+       uint32_t ring_size, cq_size;
+       struct nix_aq_enq_req *aq;
+       uint16_t first_skip;
+       int rc;
+
+       cq_size = rxq->qlen;
+       ring_size = cq_size * NIX_CQ_ENTRY_SZ;
+       rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
+                                     NIX_CQ_ALIGN, dev->node);
+       if (rz == NULL) {
+               otx2_err("Failed to allocate mem for cq hw ring");
+               rc = -ENOMEM;
+               goto fail;
+       }
+       memset(rz->addr, 0, rz->len);
+       rxq->desc = (uintptr_t)rz->addr;
+       rxq->qmask = cq_size - 1;
+
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = qid;
+       aq->ctype = NIX_AQ_CTYPE_CQ;
+       aq->op = NIX_AQ_INSTOP_INIT;
+
+       aq->cq.ena = 1;
+       aq->cq.caching = 1;
+       aq->cq.qsize = rxq->qsize;
+       aq->cq.base = rz->iova;
+       aq->cq.avg_level = 0xff;
+       aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
+       aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
+
+       /* TX pause frames enable flowctrl on RX side */
+       if (dev->fc_info.tx_pause) {
+               /* Single bpid is allocated for all rx channels for now */
+               aq->cq.bpid = dev->fc_info.bpid[0];
+               aq->cq.bp = NIX_CQ_BP_LEVEL;
+               aq->cq.bp_ena = 1;
+       }
+
+       /* Many to one reduction */
+       aq->cq.qint_idx = qid % dev->qints;
+
+       if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
+               uint16_t min_rx_drop;
+               const float rx_cq_skid = 1024 * 256;
+
+               min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
+               aq->cq.drop = min_rx_drop;
+               aq->cq.drop_ena = 1;
+       }
+
+       rc = otx2_mbox_process(mbox);
+       if (rc) {
+               otx2_err("Failed to init cq context");
+               goto fail;
+       }
+
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = qid;
+       aq->ctype = NIX_AQ_CTYPE_RQ;
+       aq->op = NIX_AQ_INSTOP_INIT;
+
+       aq->rq.sso_ena = 0;
+       aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
+       aq->rq.spb_ena = 0;
+       aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
+       first_skip = (sizeof(struct rte_mbuf));
+       first_skip += RTE_PKTMBUF_HEADROOM;
+       first_skip += rte_pktmbuf_priv_size(mp);
+       rxq->data_off = first_skip;
+
+       first_skip /= 8; /* Expressed in number of dwords */
+       aq->rq.first_skip = first_skip;
+       aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
+       aq->rq.flow_tagw = 32; /* 32-bits */
+       aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
+       aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
+       aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
+       aq->rq.lpb_sizem1 /= 8;
+       aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
+       aq->rq.ena = 1;
+       aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+       aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+       aq->rq.rq_int_ena = 0;
+       /* Many to one reduction */
+       aq->rq.qint_idx = qid % dev->qints;
+
+       if (otx2_ethdev_fixup_is_limit_cq_full(dev))
+               aq->rq.xqe_drop_ena = 1;
+
+       rc = otx2_mbox_process(mbox);
+       if (rc) {
+               otx2_err("Failed to init rq context");
+               goto fail;
+       }
+
+       return 0;
+fail:
+       return rc;
+}
+
+static int
+nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
+              struct otx2_eth_rxq *rxq, const bool enb)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_aq_enq_req *aq;
+
+       /* Pkts will be dropped silently if RQ is disabled */
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = rxq->rq;
+       aq->ctype = NIX_AQ_CTYPE_RQ;
+       aq->op = NIX_AQ_INSTOP_WRITE;
+
+       aq->rq.ena = enb;
+       aq->rq_mask.ena = ~(aq->rq_mask.ena);
+
+       return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_aq_enq_req *aq;
+       int rc;
+
+       /* RQ is already disabled */
+       /* Disable CQ */
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = rxq->rq;
+       aq->ctype = NIX_AQ_CTYPE_CQ;
+       aq->op = NIX_AQ_INSTOP_WRITE;
+
+       aq->cq.ena = 0;
+       aq->cq_mask.ena = ~(aq->cq_mask.ena);
+
+       rc = otx2_mbox_process(mbox);
+       if (rc < 0) {
+               otx2_err("Failed to disable cq context");
+               return rc;
+       }
+
+       return 0;
+}
+
+static inline int
+nix_get_data_off(struct otx2_eth_dev *dev)
+{
+       return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
+}
+
+uint64_t
+otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
+{
+       struct rte_mbuf mb_def;
+       uint64_t *tmp;
+
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+                               offsetof(struct rte_mbuf, data_off) != 2);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+                               offsetof(struct rte_mbuf, data_off) != 4);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+                               offsetof(struct rte_mbuf, data_off) != 6);
+       mb_def.nb_segs = 1;
+       mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
+       mb_def.port = port_id;
+       rte_mbuf_refcnt_set(&mb_def, 1);
+
+       /* Prevent compiler reordering: rearm_data covers previous fields */
+       rte_compiler_barrier();
+       tmp = (uint64_t *)&mb_def.rearm_data;
+
+       return *tmp;
+}
+
+static void
+otx2_nix_rx_queue_release(void *rx_queue)
+{
+       struct otx2_eth_rxq *rxq = rx_queue;
+
+       if (!rxq)
+               return;
+
+       otx2_nix_dbg("Releasing rxq %u", rxq->rq);
+       nix_cq_rq_uninit(rxq->eth_dev, rxq);
+       rte_free(rx_queue);
+}
+
+static int
+otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
+                       uint16_t nb_desc, unsigned int socket,
+                       const struct rte_eth_rxconf *rx_conf,
+                       struct rte_mempool *mp)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct rte_mempool_ops *ops;
+       struct otx2_eth_rxq *rxq;
+       const char *platform_ops;
+       enum nix_q_size_e qsize;
+       uint64_t offloads;
+       int rc;
+
+       rc = -EINVAL;
+
+       /* Compile time check to make sure all fast path elements in a CL */
+       RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
+
+       /* Sanity checks */
+       if (rx_conf->rx_deferred_start == 1) {
+               otx2_err("Deferred Rx start is not supported");
+               goto fail;
+       }
+
+       platform_ops = rte_mbuf_platform_mempool_ops();
+       /* This driver needs octeontx2_npa mempool ops to work */
+       ops = rte_mempool_get_ops(mp->ops_index);
+       if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+               otx2_err("mempool ops should be of octeontx2_npa type");
+               goto fail;
+       }
+
+       if (mp->pool_id == 0) {
+               otx2_err("Invalid pool_id");
+               goto fail;
+       }
+
+       /* Free memory prior to re-allocation if needed */
+       if (eth_dev->data->rx_queues[rq] != NULL) {
+               otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
+               otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
+               eth_dev->data->rx_queues[rq] = NULL;
+       }
+
+       offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
+       dev->rx_offloads |= offloads;
+
+       /* Find the CQ queue size */
+       qsize = nix_qsize_clampup_get(dev, nb_desc);
+       /* Allocate rxq memory */
+       rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
+       if (rxq == NULL) {
+               otx2_err("Failed to allocate rq=%d", rq);
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       rxq->eth_dev = eth_dev;
+       rxq->rq = rq;
+       rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
+       rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
+       rxq->wdata = (uint64_t)rq << 32;
+       rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
+       rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
+                                                       eth_dev->data->port_id);
+       rxq->offloads = offloads;
+       rxq->pool = mp;
+       rxq->qlen = nix_qsize_to_val(qsize);
+       rxq->qsize = qsize;
+       rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
+       rxq->tstamp = &dev->tstamp;
+
+       /* Alloc completion queue */
+       rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
+       if (rc) {
+               otx2_err("Failed to allocate rxq=%u", rq);
+               goto free_rxq;
+       }
+
+       rxq->qconf.socket_id = socket;
+       rxq->qconf.nb_desc = nb_desc;
+       rxq->qconf.mempool = mp;
+       memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
+
+       nix_rx_queue_reset(rxq);
+       otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
+                    rq, mp->name, qsize, nb_desc, rxq->qlen);
+
+       eth_dev->data->rx_queues[rq] = rxq;
+       eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
+
+free_rxq:
+       otx2_nix_rx_queue_release(rxq);
+fail:
+       return rc;
+}
+
+static inline uint8_t
+nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
+{
+       /*
+        * Maximum three segments can be supported with W8, Choose
+        * NIX_MAXSQESZ_W16 for multi segment offload.
+        */
+       if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+               return NIX_MAXSQESZ_W16;
+       else
+               return NIX_MAXSQESZ_W8;
+}
+
+static int
+nix_sq_init(struct otx2_eth_txq *txq)
+{
+       struct otx2_eth_dev *dev = txq->dev;
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_aq_enq_req *sq;
+       uint32_t rr_quantum;
+       uint16_t smq;
+       int rc;
+
+       if (txq->sqb_pool->pool_id == 0)
+               return -EINVAL;
+
+       rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
+       if (rc) {
+               otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
+               return rc;
+       }
+
+       sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       sq->qidx = txq->sq;
+       sq->ctype = NIX_AQ_CTYPE_SQ;
+       sq->op = NIX_AQ_INSTOP_INIT;
+       sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
+
+       sq->sq.smq = smq;
+       sq->sq.smq_rr_quantum = rr_quantum;
+       sq->sq.default_chan = dev->tx_chan_base;
+       sq->sq.sqe_stype = NIX_STYPE_STF;
+       sq->sq.ena = 1;
+       if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
+               sq->sq.sqe_stype = NIX_STYPE_STP;
+       sq->sq.sqb_aura =
+               npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
+       sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
+       sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
+       sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
+       sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
+
+       /* Many to one reduction */
+       sq->sq.qint_idx = txq->sq % dev->qints;
+
+       return otx2_mbox_process(mbox);
+}
+
+static int
+nix_sq_uninit(struct otx2_eth_txq *txq)
+{
+       struct otx2_eth_dev *dev = txq->dev;
+       struct otx2_mbox *mbox = dev->mbox;
+       struct ndc_sync_op *ndc_req;
+       struct nix_aq_enq_rsp *rsp;
+       struct nix_aq_enq_req *aq;
+       uint16_t sqes_per_sqb;
+       void *sqb_buf;
+       int rc, count;
+
+       otx2_nix_dbg("Cleaning up sq %u", txq->sq);
+
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = txq->sq;
+       aq->ctype = NIX_AQ_CTYPE_SQ;
+       aq->op = NIX_AQ_INSTOP_READ;
+
+       rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+       if (rc)
+               return rc;
+
+       /* Check if sq is already cleaned up */
+       if (!rsp->sq.ena)
+               return 0;
+
+       /* Disable sq */
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = txq->sq;
+       aq->ctype = NIX_AQ_CTYPE_SQ;
+       aq->op = NIX_AQ_INSTOP_WRITE;
+
+       aq->sq_mask.ena = ~aq->sq_mask.ena;
+       aq->sq.ena = 0;
+
+       rc = otx2_mbox_process(mbox);
+       if (rc)
+               return rc;
+
+       /* Read SQ and free sqb's */
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = txq->sq;
+       aq->ctype = NIX_AQ_CTYPE_SQ;
+       aq->op = NIX_AQ_INSTOP_READ;
+
+       rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+       if (rc)
+               return rc;
+
+       if (aq->sq.smq_pend)
+               otx2_err("SQ has pending sqe's");
+
+       count = aq->sq.sqb_count;
+       sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
+       /* Free SQB's that are used */
+       sqb_buf = (void *)rsp->sq.head_sqb;
+       while (count) {
+               void *next_sqb;
+
+               next_sqb = *(void **)((uintptr_t)sqb_buf + ((sqes_per_sqb - 1) *
+                                     nix_sq_max_sqe_sz(txq)));
+               npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
+                                   (uint64_t)sqb_buf);
+               sqb_buf = next_sqb;
+               count--;
+       }
+
+       /* Free next to use sqb */
+       if (rsp->sq.next_sqb)
+               npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
+                                   rsp->sq.next_sqb);
+
+       /* Sync NDC-NIX-TX for LF */
+       ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+       ndc_req->nix_lf_tx_sync = 1;
+       rc = otx2_mbox_process(mbox);
+       if (rc)
+               otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
+
+       return rc;
+}
+
+static int
+nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
+{
+       struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+       struct npa_aq_enq_req *aura_req;
+
+       aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+       aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+       aura_req->ctype = NPA_AQ_CTYPE_AURA;
+       aura_req->op = NPA_AQ_INSTOP_WRITE;
+
+       aura_req->aura.limit = nb_sqb_bufs;
+       aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
+
+       return otx2_mbox_process(npa_lf->mbox);
+}
+
+static int
+nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
+{
+       struct otx2_eth_dev *dev = txq->dev;
+       uint16_t sqes_per_sqb, nb_sqb_bufs;
+       char name[RTE_MEMPOOL_NAMESIZE];
+       struct rte_mempool_objsz sz;
+       struct npa_aura_s *aura;
+       uint32_t tmp, blk_sz;
+
+       aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
+       snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
+       blk_sz = dev->sqb_size;
+
+       if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
+               sqes_per_sqb = (dev->sqb_size / 8) / 16;
+       else
+               sqes_per_sqb = (dev->sqb_size / 8) / 8;
+
+       nb_sqb_bufs = nb_desc / sqes_per_sqb;
+       /* Clamp up to devarg passed SQB count */
+       nb_sqb_bufs =  RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_MIN_SQB,
+                             nb_sqb_bufs + NIX_SQB_LIST_SPACE));
+
+       txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
+                                                0, 0, dev->node,
+                                                MEMPOOL_F_NO_SPREAD);
+       txq->nb_sqb_bufs = nb_sqb_bufs;
+       txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
+       txq->nb_sqb_bufs_adj = nb_sqb_bufs -
+               RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
+       txq->nb_sqb_bufs_adj =
+               (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+
+       if (txq->sqb_pool == NULL) {
+               otx2_err("Failed to allocate sqe mempool");
+               goto fail;
+       }
+
+       memset(aura, 0, sizeof(*aura));
+       aura->fc_ena = 1;
+       aura->fc_addr = txq->fc_iova;
+       aura->fc_hyst_bits = 0; /* Store count on all updates */
+       if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
+               otx2_err("Failed to set ops for sqe mempool");
+               goto fail;
+       }
+       if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
+               otx2_err("Failed to populate sqe mempool");
+               goto fail;
+       }
+
+       tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
+       if (dev->sqb_size != sz.elt_size) {
+               otx2_err("sqe pool block size is not expected %d != %d",
+                        dev->sqb_size, tmp);
+               goto fail;
+       }
+
+       nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
+
+       return 0;
+fail:
+       return -ENOMEM;
+}
+
+void
+otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
+{
+       struct nix_send_ext_s *send_hdr_ext;
+       struct nix_send_hdr_s *send_hdr;
+       struct nix_send_mem_s *send_mem;
+       union nix_send_sg_s *sg;
+
+       /* Initialize the fields based on basic single segment packet */
+       memset(&txq->cmd, 0, sizeof(txq->cmd));
+
+       if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
+               send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+               /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+               send_hdr->w0.sizem1 = 2;
+
+               send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
+               send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+               if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+                       /* Default: one seg packet would have:
+                        * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
+                        * => 8/2 - 1 = 3
+                        */
+                       send_hdr->w0.sizem1 = 3;
+                       send_hdr_ext->w0.tstmp = 1;
+
+                       /* To calculate the offset for send_mem,
+                        * send_hdr->w0.sizem1 * 2
+                        */
+                       send_mem = (struct nix_send_mem_s *)(txq->cmd +
+                                               (send_hdr->w0.sizem1 << 1));
+                       send_mem->subdc = NIX_SUBDC_MEM;
+                       send_mem->dsz = 0x0;
+                       send_mem->wmem = 0x1;
+                       send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
+                       send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
+               }
+               sg = (union nix_send_sg_s *)&txq->cmd[4];
+       } else {
+               send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+               /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
+               send_hdr->w0.sizem1 = 1;
+               sg = (union nix_send_sg_s *)&txq->cmd[2];
+       }
+
+       send_hdr->w0.sq = txq->sq;
+       sg->subdc = NIX_SUBDC_SG;
+       sg->segs = 1;
+       sg->ld_type = NIX_SENDLDTYPE_LDD;
+
+       rte_smp_wmb();
+}
+
+static void
+otx2_nix_tx_queue_release(void *_txq)
+{
+       struct otx2_eth_txq *txq = _txq;
+       struct rte_eth_dev *eth_dev;
+
+       if (!txq)
+               return;
+
+       eth_dev = txq->dev->eth_dev;
+
+       otx2_nix_dbg("Releasing txq %u", txq->sq);
+
+       /* Flush and disable tm */
+       otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
+
+       /* Free sqb's and disable sq */
+       nix_sq_uninit(txq);
+
+       if (txq->sqb_pool) {
+               rte_mempool_free(txq->sqb_pool);
+               txq->sqb_pool = NULL;
+       }
+       rte_free(txq);
+}
+
+
+static int
+otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
+                       uint16_t nb_desc, unsigned int socket_id,
+                       const struct rte_eth_txconf *tx_conf)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       const struct rte_memzone *fc;
+       struct otx2_eth_txq *txq;
+       uint64_t offloads;
+       int rc;
+
+       rc = -EINVAL;
+
+       /* Compile time check to make sure all fast path elements in a CL */
+       RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
+
+       if (tx_conf->tx_deferred_start) {
+               otx2_err("Tx deferred start is not supported");
+               goto fail;
+       }
+
+       /* Free memory prior to re-allocation if needed. */
+       if (eth_dev->data->tx_queues[sq] != NULL) {
+               otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
+               otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
+               eth_dev->data->tx_queues[sq] = NULL;
+       }
+
+       /* Find the expected offloads for this queue */
+       offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
+
+       /* Allocating tx queue data structure */
+       txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
+                                OTX2_ALIGN, socket_id);
+       if (txq == NULL) {
+               otx2_err("Failed to alloc txq=%d", sq);
+               rc = -ENOMEM;
+               goto fail;
+       }
+       txq->sq = sq;
+       txq->dev = dev;
+       txq->sqb_pool = NULL;
+       txq->offloads = offloads;
+       dev->tx_offloads |= offloads;
+
+       /*
+        * Allocate memory for flow control updates from HW.
+        * Alloc one cache line, so that fits all FC_STYPE modes.
+        */
+       fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
+                                     OTX2_ALIGN + sizeof(struct npa_aura_s),
+                                     OTX2_ALIGN, dev->node);
+       if (fc == NULL) {
+               otx2_err("Failed to allocate mem for fcmem");
+               rc = -ENOMEM;
+               goto free_txq;
+       }
+       txq->fc_iova = fc->iova;
+       txq->fc_mem = fc->addr;
+
+       /* Initialize the aura sqb pool */
+       rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
+       if (rc) {
+               otx2_err("Failed to alloc sqe pool rc=%d", rc);
+               goto free_txq;
+       }
+
+       /* Initialize the SQ */
+       rc = nix_sq_init(txq);
+       if (rc) {
+               otx2_err("Failed to init sq=%d context", sq);
+               goto free_txq;
+       }
+
+       txq->fc_cache_pkts = 0;
+       txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
+       /* Evenly distribute LMT slot for each sq */
+       txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
+
+       txq->qconf.socket_id = socket_id;
+       txq->qconf.nb_desc = nb_desc;
+       memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
+
+       otx2_nix_form_default_desc(txq);
+
+       otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
+                    " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
+                    fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
+                    txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
+       eth_dev->data->tx_queues[sq] = txq;
+       eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
+
+free_txq:
+       otx2_nix_tx_queue_release(txq);
+fail:
+       return rc;
+}
+
+static int
+nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_eth_qconf *tx_qconf = NULL;
+       struct otx2_eth_qconf *rx_qconf = NULL;
+       struct otx2_eth_txq **txq;
+       struct otx2_eth_rxq **rxq;
+       int i, nb_rxq, nb_txq;
+
+       nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
+       nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
+
+       tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
+       if (tx_qconf == NULL) {
+               otx2_err("Failed to allocate memory for tx_qconf");
+               goto fail;
+       }
+
+       rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
+       if (rx_qconf == NULL) {
+               otx2_err("Failed to allocate memory for rx_qconf");
+               goto fail;
+       }
+
+       txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+       for (i = 0; i < nb_txq; i++) {
+               if (txq[i] == NULL) {
+                       otx2_err("txq[%d] is already released", i);
+                       goto fail;
+               }
+               memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
+               otx2_nix_tx_queue_release(txq[i]);
+               eth_dev->data->tx_queues[i] = NULL;
+       }
+
+       rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
+       for (i = 0; i < nb_rxq; i++) {
+               if (rxq[i] == NULL) {
+                       otx2_err("rxq[%d] is already released", i);
+                       goto fail;
+               }
+               memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
+               otx2_nix_rx_queue_release(rxq[i]);
+               eth_dev->data->rx_queues[i] = NULL;
+       }
+
+       dev->tx_qconf = tx_qconf;
+       dev->rx_qconf = rx_qconf;
+       return 0;
+
+fail:
+       if (tx_qconf)
+               free(tx_qconf);
+       if (rx_qconf)
+               free(rx_qconf);
+
+       return -ENOMEM;
+}
+
+static int
+nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
+{
+       struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
+       struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
+       struct otx2_eth_txq **txq;
+       struct otx2_eth_rxq **rxq;
+       int rc, i, nb_rxq, nb_txq;
+
+       nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
+       nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
+
+       rc = -ENOMEM;
+       /* Setup tx & rx queues with previous configuration so
+        * that the queues can be functional in cases like ports
+        * are started without re configuring queues.
+        *
+        * Usual re config sequence is like below:
+        * port_configure() {
+        *      if(reconfigure) {
+        *              queue_release()
+        *              queue_setup()
+        *      }
+        *      queue_configure() {
+        *              queue_release()
+        *              queue_setup()
+        *      }
+        * }
+        * port_start()
+        *
+        * In some application's control path, queue_configure() would
+        * NOT be invoked for TXQs/RXQs in port_configure().
+        * In such cases, queues can be functional after start as the
+        * queues are already setup in port_configure().
+        */
+       for (i = 0; i < nb_txq; i++) {
+               rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
+                                            tx_qconf[i].socket_id,
+                                            &tx_qconf[i].conf.tx);
+               if (rc) {
+                       otx2_err("Failed to setup tx queue rc=%d", rc);
+                       txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+                       for (i -= 1; i >= 0; i--)
+                               otx2_nix_tx_queue_release(txq[i]);
+                       goto fail;
+               }
+       }
+
+       free(tx_qconf); tx_qconf = NULL;
+
+       for (i = 0; i < nb_rxq; i++) {
+               rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
+                                            rx_qconf[i].socket_id,
+                                            &rx_qconf[i].conf.rx,
+                                            rx_qconf[i].mempool);
+               if (rc) {
+                       otx2_err("Failed to setup rx queue rc=%d", rc);
+                       rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
+                       for (i -= 1; i >= 0; i--)
+                               otx2_nix_rx_queue_release(rxq[i]);
+                       goto release_tx_queues;
+               }
+       }
+
+       free(rx_qconf); rx_qconf = NULL;
+
+       return 0;
+
+release_tx_queues:
+       txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+               otx2_nix_tx_queue_release(txq[i]);
+fail:
+       if (tx_qconf)
+               free(tx_qconf);
+       if (rx_qconf)
+               free(rx_qconf);
+
+       return rc;
+}
+
+static uint16_t
+nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+       RTE_SET_USED(queue);
+       RTE_SET_USED(mbufs);
+       RTE_SET_USED(pkts);
+
+       return 0;
+}
+
+static void
+nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
+{
+       /* These dummy functions are required for supporting
+        * some applications which reconfigure queues without
+        * stopping tx burst and rx burst threads(eg kni app)
+        * When the queues context is saved, txq/rxqs are released
+        * which caused app crash since rx/tx burst is still
+        * on different lcores
+        */
+       eth_dev->tx_pkt_burst = nix_eth_nop_burst;
+       eth_dev->rx_pkt_burst = nix_eth_nop_burst;
+       rte_mb();
+}
+
 static int
 otx2_nix_configure(struct rte_eth_dev *eth_dev)
 {
@@ -168,7 +1100,13 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
 
        /* Free the resources allocated from the previous configure */
        if (dev->configured == 1) {
+               otx2_nix_rxchan_bpid_cfg(eth_dev, false);
                oxt2_nix_unregister_queue_irqs(eth_dev);
+               nix_set_nop_rxtx_function(eth_dev);
+               rc = nix_store_queue_cfg_and_then_release(eth_dev);
+               if (rc)
+                       goto fail;
+               otx2_nix_tm_fini(eth_dev);
                nix_lf_free(dev);
        }
 
@@ -195,6 +1133,20 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
                goto fail;
        }
 
+       /* Configure RSS */
+       rc = otx2_nix_rss_config(eth_dev);
+       if (rc) {
+               otx2_err("Failed to configure rss rc=%d", rc);
+               goto free_nix_lf;
+       }
+
+       /* Init the default TM scheduler hierarchy */
+       rc = otx2_nix_tm_init_default(eth_dev);
+       if (rc) {
+               otx2_err("Failed to init traffic manager rc=%d", rc);
+               goto free_nix_lf;
+       }
+
        /* Register queue IRQs */
        rc = oxt2_nix_register_queue_irqs(eth_dev);
        if (rc) {
@@ -202,6 +1154,32 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
                goto free_nix_lf;
        }
 
+       rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
+       if (rc) {
+               otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
+               goto free_nix_lf;
+       }
+
+       /* Enable PTP if it was requested by the app or if it is already
+        * enabled in PF owning this VF
+        */
+       memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
+       if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+           otx2_ethdev_is_ptp_en(dev))
+               otx2_nix_timesync_enable(eth_dev);
+       else
+               otx2_nix_timesync_disable(eth_dev);
+
+       /*
+        * Restore queue config when reconfigure followed by
+        * reconfigure and no queue configure invoked from application case.
+        */
+       if (dev->configured == 1) {
+               rc = nix_restore_queue_cfg(eth_dev);
+               if (rc)
+                       goto free_nix_lf;
+       }
+
        /* Update the mac address */
        ea = eth_dev->data->mac_addrs;
        memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
@@ -229,24 +1207,149 @@ fail:
        return rc;
 }
 
+int
+otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct otx2_eth_txq *txq;
+       int rc = -EINVAL;
+
+       txq = eth_dev->data->tx_queues[qidx];
+
+       if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = otx2_nix_sq_sqb_aura_fc(txq, true);
+       if (rc) {
+               otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
+                        qidx, rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+
+done:
+       return rc;
+}
+
+int
+otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct otx2_eth_txq *txq;
+       int rc;
+
+       txq = eth_dev->data->tx_queues[qidx];
+
+       if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       txq->fc_cache_pkts = 0;
+
+       rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+       if (rc) {
+               otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
+                        qidx, rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+done:
+       return rc;
+}
+
+static int
+otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+       struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
+       struct rte_eth_dev_data *data = eth_dev->data;
+       int rc;
+
+       if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
+       if (rc) {
+               otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+
+done:
+       return rc;
+}
+
+static int
+otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+       struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
+       struct rte_eth_dev_data *data = eth_dev->data;
+       int rc;
+
+       if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
+       if (rc) {
+               otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+done:
+       return rc;
+}
+
 /* Initialize and register driver with DPDK Application */
 static const struct eth_dev_ops otx2_eth_dev_ops = {
        .dev_infos_get            = otx2_nix_info_get,
        .dev_configure            = otx2_nix_configure,
        .link_update              = otx2_nix_link_update,
+       .tx_queue_setup           = otx2_nix_tx_queue_setup,
+       .tx_queue_release         = otx2_nix_tx_queue_release,
+       .rx_queue_setup           = otx2_nix_rx_queue_setup,
+       .rx_queue_release         = otx2_nix_rx_queue_release,
+       .tx_queue_start           = otx2_nix_tx_queue_start,
+       .tx_queue_stop            = otx2_nix_tx_queue_stop,
+       .rx_queue_start           = otx2_nix_rx_queue_start,
+       .rx_queue_stop            = otx2_nix_rx_queue_stop,
+       .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
        .stats_get                = otx2_nix_dev_stats_get,
        .stats_reset              = otx2_nix_dev_stats_reset,
        .get_reg                  = otx2_nix_dev_get_reg,
+       .mac_addr_add             = otx2_nix_mac_addr_add,
+       .mac_addr_remove          = otx2_nix_mac_addr_del,
+       .mac_addr_set             = otx2_nix_mac_addr_set,
        .promiscuous_enable       = otx2_nix_promisc_enable,
        .promiscuous_disable      = otx2_nix_promisc_disable,
        .allmulticast_enable      = otx2_nix_allmulticast_enable,
        .allmulticast_disable     = otx2_nix_allmulticast_disable,
        .queue_stats_mapping_set  = otx2_nix_queue_stats_mapping,
+       .reta_update              = otx2_nix_dev_reta_update,
+       .reta_query               = otx2_nix_dev_reta_query,
+       .rss_hash_update          = otx2_nix_rss_hash_update,
+       .rss_hash_conf_get        = otx2_nix_rss_hash_conf_get,
        .xstats_get               = otx2_nix_xstats_get,
        .xstats_get_names         = otx2_nix_xstats_get_names,
        .xstats_reset             = otx2_nix_xstats_reset,
        .xstats_get_by_id         = otx2_nix_xstats_get_by_id,
        .xstats_get_names_by_id   = otx2_nix_xstats_get_names_by_id,
+       .rxq_info_get             = otx2_nix_rxq_info_get,
+       .txq_info_get             = otx2_nix_txq_info_get,
+       .rx_queue_count           = otx2_nix_rx_queue_count,
+       .rx_descriptor_done       = otx2_nix_rx_descriptor_done,
+       .rx_descriptor_status     = otx2_nix_rx_descriptor_status,
+       .tx_done_cleanup          = otx2_nix_tx_done_cleanup,
+       .pool_ops_supported       = otx2_nix_pool_ops_supported,
+       .get_module_info          = otx2_nix_get_module_info,
+       .get_module_eeprom        = otx2_nix_get_module_eeprom,
+       .flow_ctrl_get            = otx2_nix_flow_ctrl_get,
+       .flow_ctrl_set            = otx2_nix_flow_ctrl_set,
+       .timesync_enable          = otx2_nix_timesync_enable,
+       .timesync_disable         = otx2_nix_timesync_disable,
 };
 
 static inline int
@@ -404,6 +1507,9 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
        /* Also sync same MAC address to CGX table */
        otx2_cgx_mac_addr_set(eth_dev, &eth_dev->data->mac_addrs[0]);
 
+       /* Initialize the tm data structures */
+       otx2_nix_tm_conf_init(eth_dev);
+
        dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
        dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
 
@@ -439,12 +1545,38 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
 {
        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
        struct rte_pci_device *pci_dev;
-       int rc;
+       int rc, i;
 
        /* Nothing to be done for secondary processes */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       /* Disable nix bpid config */
+       otx2_nix_rxchan_bpid_cfg(eth_dev, false);
+
+       /* Disable PTP if already enabled */
+       if (otx2_ethdev_is_ptp_en(dev))
+               otx2_nix_timesync_disable(eth_dev);
+
+       /* Free up SQs */
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
+               eth_dev->data->tx_queues[i] = NULL;
+       }
+       eth_dev->data->nb_tx_queues = 0;
+
+       /* Free up RQ's and CQ's */
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
+               eth_dev->data->rx_queues[i] = NULL;
+       }
+       eth_dev->data->nb_rx_queues = 0;
+
+       /* Free tm resources */
+       rc = otx2_nix_tm_fini(eth_dev);
+       if (rc)
+               otx2_err("Failed to cleanup tm, rc=%d", rc);
+
        /* Unregister queue irqs */
        oxt2_nix_unregister_queue_irqs(eth_dev);