net/ice/base: initialize structures to zero
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
index 91f31df..12903ec 100644 (file)
@@ -20,14 +20,67 @@ enum otx2_tm_node_level {
        OTX2_TM_LVL_MAX,
 };
 
+static inline
+uint64_t shaper2regval(struct shaper_params *shaper)
+{
+       return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
+               (shaper->div_exp << 13) | (shaper->exponent << 9) |
+               (shaper->mantissa << 1);
+}
+
+static int
+nix_get_link(struct otx2_eth_dev *dev)
+{
+       int link = 13 /* SDP */;
+       uint16_t lmac_chan;
+       uint16_t map;
+
+       lmac_chan = dev->tx_chan_base;
+
+       /* CGX lmac link */
+       if (lmac_chan >= 0x800) {
+               map = lmac_chan & 0x7FF;
+               link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
+       } else if (lmac_chan < 0x700) {
+               /* LBK channel */
+               link = 12;
+       }
+
+       return link;
+}
+
+static uint8_t
+nix_get_relchan(struct otx2_eth_dev *dev)
+{
+       return dev->tx_chan_base & 0xff;
+}
+
 static bool
 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
 {
        bool is_lbk = otx2_dev_is_lbk(dev);
-       return otx2_dev_is_pf(dev) && !otx2_dev_is_A0(dev) &&
+       return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) &&
                !is_lbk && !dev->maxvf;
 }
 
+static int
+find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
+{
+       struct otx2_nix_tm_node *child_node;
+
+       TAILQ_FOREACH(child_node, &dev->node_list, node) {
+               if (!child_node->parent)
+                       continue;
+               if (!(child_node->parent->id == node_id))
+                       continue;
+               if (child_node->priority == child_node->parent->rr_prio)
+                       continue;
+               return child_node->hw_id - child_node->priority;
+       }
+       return 0;
+}
+
+
 static struct otx2_nix_tm_shaper_profile *
 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
 {
@@ -40,6 +93,451 @@ nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
        return NULL;
 }
 
+static inline uint64_t
+shaper_rate_to_nix(uint64_t cclk_hz, uint64_t cclk_ticks,
+                  uint64_t value, uint64_t *exponent_p,
+                  uint64_t *mantissa_p, uint64_t *div_exp_p)
+{
+       uint64_t div_exp, exponent, mantissa;
+
+       /* Boundary checks */
+       if (value < MIN_SHAPER_RATE(cclk_hz, cclk_ticks) ||
+           value > MAX_SHAPER_RATE(cclk_hz, cclk_ticks))
+               return 0;
+
+       if (value <= SHAPER_RATE(cclk_hz, cclk_ticks, 0, 0, 0)) {
+               /* Calculate rate div_exp and mantissa using
+                * the following formula:
+                *
+                * value = (cclk_hz * (256 + mantissa)
+                *              / ((cclk_ticks << div_exp) * 256)
+                */
+               div_exp = 0;
+               exponent = 0;
+               mantissa = MAX_RATE_MANTISSA;
+
+               while (value < (cclk_hz / (cclk_ticks << div_exp)))
+                       div_exp += 1;
+
+               while (value <
+                      ((cclk_hz * (256 + mantissa)) /
+                       ((cclk_ticks << div_exp) * 256)))
+                       mantissa -= 1;
+       } else {
+               /* Calculate rate exponent and mantissa using
+                * the following formula:
+                *
+                * value = (cclk_hz * ((256 + mantissa) << exponent)
+                *              / (cclk_ticks * 256)
+                *
+                */
+               div_exp = 0;
+               exponent = MAX_RATE_EXPONENT;
+               mantissa = MAX_RATE_MANTISSA;
+
+               while (value < (cclk_hz * (1 << exponent)) / cclk_ticks)
+                       exponent -= 1;
+
+               while (value < (cclk_hz * ((256 + mantissa) << exponent)) /
+                      (cclk_ticks * 256))
+                       mantissa -= 1;
+       }
+
+       if (div_exp > MAX_RATE_DIV_EXP ||
+           exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
+               return 0;
+
+       if (div_exp_p)
+               *div_exp_p = div_exp;
+       if (exponent_p)
+               *exponent_p = exponent;
+       if (mantissa_p)
+               *mantissa_p = mantissa;
+
+       /* Calculate real rate value */
+       return SHAPER_RATE(cclk_hz, cclk_ticks, exponent, mantissa, div_exp);
+}
+
+static inline uint64_t
+lx_shaper_rate_to_nix(uint64_t cclk_hz, uint32_t hw_lvl,
+                     uint64_t value, uint64_t *exponent,
+                     uint64_t *mantissa, uint64_t *div_exp)
+{
+       if (hw_lvl == NIX_TXSCH_LVL_TL1)
+               return shaper_rate_to_nix(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS,
+                                         value, exponent, mantissa, div_exp);
+       else
+               return shaper_rate_to_nix(cclk_hz, LX_TIME_WHEEL_CCLK_TICKS,
+                                         value, exponent, mantissa, div_exp);
+}
+
+static inline uint64_t
+shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
+                   uint64_t *mantissa_p)
+{
+       uint64_t exponent, mantissa;
+
+       if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
+               return 0;
+
+       /* Calculate burst exponent and mantissa using
+        * the following formula:
+        *
+        * value = (((256 + mantissa) << (exponent + 1)
+        / 256)
+        *
+        */
+       exponent = MAX_BURST_EXPONENT;
+       mantissa = MAX_BURST_MANTISSA;
+
+       while (value < (1ull << (exponent + 1)))
+               exponent -= 1;
+
+       while (value < ((256 + mantissa) << (exponent + 1)) / 256)
+               mantissa -= 1;
+
+       if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
+               return 0;
+
+       if (exponent_p)
+               *exponent_p = exponent;
+       if (mantissa_p)
+               *mantissa_p = mantissa;
+
+       return SHAPER_BURST(exponent, mantissa);
+}
+
+static int
+configure_shaper_cir_pir_reg(struct otx2_eth_dev *dev,
+                            struct otx2_nix_tm_node *tm_node,
+                            struct shaper_params *cir,
+                            struct shaper_params *pir)
+{
+       uint32_t shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+       struct otx2_nix_tm_shaper_profile *shaper_profile = NULL;
+       struct rte_tm_shaper_params *param;
+
+       shaper_profile_id = tm_node->params.shaper_profile_id;
+
+       shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);
+       if (shaper_profile) {
+               param = &shaper_profile->profile;
+               /* Calculate CIR exponent and mantissa */
+               if (param->committed.rate)
+                       cir->rate = lx_shaper_rate_to_nix(CCLK_HZ,
+                                                         tm_node->hw_lvl_id,
+                                                         param->committed.rate,
+                                                         &cir->exponent,
+                                                         &cir->mantissa,
+                                                         &cir->div_exp);
+
+               /* Calculate PIR exponent and mantissa */
+               if (param->peak.rate)
+                       pir->rate = lx_shaper_rate_to_nix(CCLK_HZ,
+                                                         tm_node->hw_lvl_id,
+                                                         param->peak.rate,
+                                                         &pir->exponent,
+                                                         &pir->mantissa,
+                                                         &pir->div_exp);
+
+               /* Calculate CIR burst exponent and mantissa */
+               if (param->committed.size)
+                       cir->burst = shaper_burst_to_nix(param->committed.size,
+                                                        &cir->burst_exponent,
+                                                        &cir->burst_mantissa);
+
+               /* Calculate PIR burst exponent and mantissa */
+               if (param->peak.size)
+                       pir->burst = shaper_burst_to_nix(param->peak.size,
+                                                        &pir->burst_exponent,
+                                                        &pir->burst_mantissa);
+       }
+
+       return 0;
+}
+
+static int
+send_tm_reqval(struct otx2_mbox *mbox, struct nix_txschq_config *req)
+{
+       int rc;
+
+       if (req->num_regs > MAX_REGS_PER_MBOX_MSG)
+               return -ERANGE;
+
+       rc = otx2_mbox_process(mbox);
+       if (rc)
+               return rc;
+
+       req->num_regs = 0;
+       return 0;
+}
+
+static int
+populate_tm_registers(struct otx2_eth_dev *dev,
+                     struct otx2_nix_tm_node *tm_node)
+{
+       uint64_t strict_schedul_prio, rr_prio;
+       struct otx2_mbox *mbox = dev->mbox;
+       volatile uint64_t *reg, *regval;
+       uint64_t parent = 0, child = 0;
+       struct shaper_params cir, pir;
+       struct nix_txschq_config *req;
+       uint64_t rr_quantum;
+       uint32_t hw_lvl;
+       uint32_t schq;
+       int rc;
+
+       memset(&cir, 0, sizeof(cir));
+       memset(&pir, 0, sizeof(pir));
+
+       /* Skip leaf nodes */
+       if (tm_node->hw_lvl_id == NIX_TXSCH_LVL_CNT)
+               return 0;
+
+       /* Root node will not have a parent node */
+       if (tm_node->hw_lvl_id == dev->otx2_tm_root_lvl)
+               parent = tm_node->parent_hw_id;
+       else
+               parent = tm_node->parent->hw_id;
+
+       /* Do we need this trigger to configure TL1 */
+       if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
+           tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) {
+               schq = parent;
+               /*
+                * Default config for TL1.
+                * For VF this is always ignored.
+                */
+
+               req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+               req->lvl = NIX_TXSCH_LVL_TL1;
+
+               /* Set DWRR quantum */
+               req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
+               req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
+               req->num_regs++;
+
+               req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+               req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+               req->num_regs++;
+
+               req->reg[2] = NIX_AF_TL1X_CIR(schq);
+               req->regval[2] = 0;
+               req->num_regs++;
+
+               rc = send_tm_reqval(mbox, req);
+               if (rc)
+                       goto error;
+       }
+
+       if (tm_node->hw_lvl_id != NIX_TXSCH_LVL_SMQ)
+               child = find_prio_anchor(dev, tm_node->id);
+
+       rr_prio = tm_node->rr_prio;
+       hw_lvl = tm_node->hw_lvl_id;
+       strict_schedul_prio = tm_node->priority;
+       schq = tm_node->hw_id;
+       rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX) /
+               MAX_SCHED_WEIGHT;
+
+       configure_shaper_cir_pir_reg(dev, tm_node, &cir, &pir);
+
+       otx2_tm_dbg("Configure node %p, lvl %u hw_lvl %u, id %u, hw_id %u,"
+                    "parent_hw_id %" PRIx64 ", pir %" PRIx64 ", cir %" PRIx64,
+                    tm_node, tm_node->level_id, hw_lvl,
+                    tm_node->id, schq, parent, pir.rate, cir.rate);
+
+       rc = -EFAULT;
+
+       switch (hw_lvl) {
+       case NIX_TXSCH_LVL_SMQ:
+               req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+               req->lvl = hw_lvl;
+               reg = req->reg;
+               regval = req->regval;
+               req->num_regs = 0;
+
+               /* Set xoff which will be cleared later */
+               *reg++ = NIX_AF_SMQX_CFG(schq);
+               *regval++ = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) |
+                               (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
+               req->num_regs++;
+               *reg++ = NIX_AF_MDQX_PARENT(schq);
+               *regval++ = parent << 16;
+               req->num_regs++;
+               *reg++ = NIX_AF_MDQX_SCHEDULE(schq);
+               *regval++ = (strict_schedul_prio << 24) | rr_quantum;
+               req->num_regs++;
+               if (pir.rate && pir.burst) {
+                       *reg++ = NIX_AF_MDQX_PIR(schq);
+                       *regval++ = shaper2regval(&pir) | 1;
+                       req->num_regs++;
+               }
+
+               if (cir.rate && cir.burst) {
+                       *reg++ = NIX_AF_MDQX_CIR(schq);
+                       *regval++ = shaper2regval(&cir) | 1;
+                       req->num_regs++;
+               }
+
+               rc = send_tm_reqval(mbox, req);
+               if (rc)
+                       goto error;
+               break;
+       case NIX_TXSCH_LVL_TL4:
+               req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+               req->lvl = hw_lvl;
+               req->num_regs = 0;
+               reg = req->reg;
+               regval = req->regval;
+
+               *reg++ = NIX_AF_TL4X_PARENT(schq);
+               *regval++ = parent << 16;
+               req->num_regs++;
+               *reg++ = NIX_AF_TL4X_TOPOLOGY(schq);
+               *regval++ = (child << 32) | (rr_prio << 1);
+               req->num_regs++;
+               *reg++ = NIX_AF_TL4X_SCHEDULE(schq);
+               *regval++ = (strict_schedul_prio << 24) | rr_quantum;
+               req->num_regs++;
+               if (pir.rate && pir.burst) {
+                       *reg++ = NIX_AF_TL4X_PIR(schq);
+                       *regval++ = shaper2regval(&pir) | 1;
+                       req->num_regs++;
+               }
+               if (cir.rate && cir.burst) {
+                       *reg++ = NIX_AF_TL4X_CIR(schq);
+                       *regval++ = shaper2regval(&cir) | 1;
+                       req->num_regs++;
+               }
+
+               rc = send_tm_reqval(mbox, req);
+               if (rc)
+                       goto error;
+               break;
+       case NIX_TXSCH_LVL_TL3:
+               req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+               req->lvl = hw_lvl;
+               req->num_regs = 0;
+               reg = req->reg;
+               regval = req->regval;
+
+               *reg++ = NIX_AF_TL3X_PARENT(schq);
+               *regval++ = parent << 16;
+               req->num_regs++;
+               *reg++ = NIX_AF_TL3X_TOPOLOGY(schq);
+               *regval++ = (child << 32) | (rr_prio << 1);
+               req->num_regs++;
+               *reg++ = NIX_AF_TL3X_SCHEDULE(schq);
+               *regval++ = (strict_schedul_prio << 24) | rr_quantum;
+               req->num_regs++;
+               if (pir.rate && pir.burst) {
+                       *reg++ = NIX_AF_TL3X_PIR(schq);
+                       *regval++ = shaper2regval(&pir) | 1;
+                       req->num_regs++;
+               }
+               if (cir.rate && cir.burst) {
+                       *reg++ = NIX_AF_TL3X_CIR(schq);
+                       *regval++ = shaper2regval(&cir) | 1;
+                       req->num_regs++;
+               }
+
+               rc = send_tm_reqval(mbox, req);
+               if (rc)
+                       goto error;
+               break;
+       case NIX_TXSCH_LVL_TL2:
+               req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+               req->lvl = hw_lvl;
+               req->num_regs = 0;
+               reg = req->reg;
+               regval = req->regval;
+
+               *reg++ = NIX_AF_TL2X_PARENT(schq);
+               *regval++ = parent << 16;
+               req->num_regs++;
+               *reg++ = NIX_AF_TL2X_TOPOLOGY(schq);
+               *regval++ = (child << 32) | (rr_prio << 1);
+               req->num_regs++;
+               *reg++ = NIX_AF_TL2X_SCHEDULE(schq);
+               if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2)
+                       *regval++ = (1 << 24) | rr_quantum;
+               else
+                       *regval++ = (strict_schedul_prio << 24) | rr_quantum;
+               req->num_regs++;
+               *reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq, nix_get_link(dev));
+               *regval++ = BIT_ULL(12) | nix_get_relchan(dev);
+               req->num_regs++;
+               if (pir.rate && pir.burst) {
+                       *reg++ = NIX_AF_TL2X_PIR(schq);
+                       *regval++ = shaper2regval(&pir) | 1;
+                       req->num_regs++;
+               }
+               if (cir.rate && cir.burst) {
+                       *reg++ = NIX_AF_TL2X_CIR(schq);
+                       *regval++ = shaper2regval(&cir) | 1;
+                       req->num_regs++;
+               }
+
+               rc = send_tm_reqval(mbox, req);
+               if (rc)
+                       goto error;
+               break;
+       case NIX_TXSCH_LVL_TL1:
+               req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+               req->lvl = hw_lvl;
+               req->num_regs = 0;
+               reg = req->reg;
+               regval = req->regval;
+
+               *reg++ = NIX_AF_TL1X_SCHEDULE(schq);
+               *regval++ = rr_quantum;
+               req->num_regs++;
+               *reg++ = NIX_AF_TL1X_TOPOLOGY(schq);
+               *regval++ = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
+               req->num_regs++;
+               if (cir.rate && cir.burst) {
+                       *reg++ = NIX_AF_TL1X_CIR(schq);
+                       *regval++ = shaper2regval(&cir) | 1;
+                       req->num_regs++;
+               }
+
+               rc = send_tm_reqval(mbox, req);
+               if (rc)
+                       goto error;
+               break;
+       }
+
+       return 0;
+error:
+       otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
+       return rc;
+}
+
+
+static int
+nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
+{
+       struct otx2_nix_tm_node *tm_node;
+       uint32_t lvl;
+       int rc = 0;
+
+       if (nix_get_link(dev) == 13)
+               return -EPERM;
+
+       for (lvl = 0; lvl < (uint32_t)dev->otx2_tm_root_lvl + 1; lvl++) {
+               TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+                       if (tm_node->hw_lvl_id == lvl) {
+                               rc = populate_tm_registers(dev, tm_node);
+                               if (rc)
+                                       goto exit;
+                       }
+               }
+       }
+exit:
+       return rc;
+}
+
 static struct otx2_nix_tm_node *
 nix_tm_node_search(struct otx2_eth_dev *dev,
                   uint32_t node_id, bool user)
@@ -178,6 +676,224 @@ nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
        return 0;
 }
 
+static int
+nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_txschq_config *req;
+
+       req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+       req->lvl = NIX_TXSCH_LVL_SMQ;
+       req->num_regs = 1;
+
+       req->reg[0] = NIX_AF_SMQX_CFG(smq);
+       /* Unmodified fields */
+       req->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) |
+                               (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
+
+       if (enable)
+               req->regval[0] |= BIT_ULL(50) | BIT_ULL(49);
+       else
+               req->regval[0] |= 0;
+
+       return otx2_mbox_process(mbox);
+}
+
+int
+otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
+{
+       struct otx2_eth_txq *txq = __txq;
+       struct npa_aq_enq_req *req;
+       struct npa_aq_enq_rsp *rsp;
+       struct otx2_npa_lf *lf;
+       struct otx2_mbox *mbox;
+       uint64_t aura_handle;
+       int rc;
+
+       lf = otx2_npa_lf_obj_get();
+       if (!lf)
+               return -EFAULT;
+       mbox = lf->mbox;
+       /* Set/clear sqb aura fc_ena */
+       aura_handle = txq->sqb_pool->pool_id;
+       req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+       req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
+       req->ctype = NPA_AQ_CTYPE_AURA;
+       req->op = NPA_AQ_INSTOP_WRITE;
+       /* Below is not needed for aura writes but AF driver needs it */
+       /* AF will translate to associated poolctx */
+       req->aura.pool_addr = req->aura_id;
+
+       req->aura.fc_ena = enable;
+       req->aura_mask.fc_ena = 1;
+
+       rc = otx2_mbox_process(mbox);
+       if (rc)
+               return rc;
+
+       /* Read back npa aura ctx */
+       req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+       req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
+       req->ctype = NPA_AQ_CTYPE_AURA;
+       req->op = NPA_AQ_INSTOP_READ;
+
+       rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+       if (rc)
+               return rc;
+
+       /* Init when enabled as there might be no triggers */
+       if (enable)
+               *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
+       else
+               *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
+       /* Sync write barrier */
+       rte_wmb();
+
+       return 0;
+}
+
+static void
+nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
+{
+       uint16_t sqb_cnt, head_off, tail_off;
+       struct otx2_eth_dev *dev = txq->dev;
+       uint16_t sq = txq->sq;
+       uint64_t reg, val;
+       int64_t *regaddr;
+
+       while (true) {
+               reg = ((uint64_t)sq << 32);
+               regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
+               val = otx2_atomic64_add_nosync(reg, regaddr);
+
+               regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
+               val = otx2_atomic64_add_nosync(reg, regaddr);
+               sqb_cnt = val & 0xFFFF;
+               head_off = (val >> 20) & 0x3F;
+               tail_off = (val >> 28) & 0x3F;
+
+               /* SQ reached quiescent state */
+               if (sqb_cnt <= 1 && head_off == tail_off &&
+                   (*txq->fc_mem == txq->nb_sqb_bufs)) {
+                       break;
+               }
+
+               rte_pause();
+       }
+}
+
+int
+otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)
+{
+       struct otx2_eth_txq *txq = __txq;
+       struct otx2_eth_dev *dev = txq->dev;
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_aq_enq_req *req;
+       struct nix_aq_enq_rsp *rsp;
+       uint16_t smq;
+       int rc;
+
+       /* Get smq from sq */
+       req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       req->qidx = txq->sq;
+       req->ctype = NIX_AQ_CTYPE_SQ;
+       req->op = NIX_AQ_INSTOP_READ;
+       rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+       if (rc) {
+               otx2_err("Failed to get smq, rc=%d", rc);
+               return -EIO;
+       }
+
+       /* Check if sq is enabled */
+       if (!rsp->sq.ena)
+               return 0;
+
+       smq = rsp->sq.smq;
+
+       /* Enable CGX RXTX to drain pkts */
+       if (!dev_started) {
+               rc = otx2_cgx_rxtx_start(dev);
+               if (rc)
+                       return rc;
+       }
+
+       rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+       if (rc < 0) {
+               otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
+               goto cleanup;
+       }
+
+       /* Disable smq xoff for case it was enabled earlier */
+       rc = nix_smq_xoff(dev, smq, false);
+       if (rc) {
+               otx2_err("Failed to enable smq for sq %u, rc=%d", txq->sq, rc);
+               goto cleanup;
+       }
+
+       /* Wait for sq entries to be flushed */
+       nix_txq_flush_sq_spin(txq);
+
+       /* Flush and enable smq xoff */
+       rc = nix_smq_xoff(dev, smq, true);
+       if (rc) {
+               otx2_err("Failed to disable smq for sq %u, rc=%d", txq->sq, rc);
+               return rc;
+       }
+
+cleanup:
+       /* Restore cgx state */
+       if (!dev_started)
+               rc |= otx2_cgx_rxtx_stop(dev);
+
+       return rc;
+}
+
+static int
+nix_tm_sw_xon(struct otx2_eth_txq *txq,
+             uint16_t smq, uint32_t rr_quantum)
+{
+       struct otx2_eth_dev *dev = txq->dev;
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_aq_enq_req *req;
+       int rc;
+
+       otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum %u",
+                   txq->sq, txq->sq, rr_quantum);
+       /* Set smq from sq */
+       req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       req->qidx = txq->sq;
+       req->ctype = NIX_AQ_CTYPE_SQ;
+       req->op = NIX_AQ_INSTOP_WRITE;
+       req->sq.smq = smq;
+       req->sq.smq_rr_quantum = rr_quantum;
+       req->sq_mask.smq = ~req->sq_mask.smq;
+       req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
+
+       rc = otx2_mbox_process(mbox);
+       if (rc) {
+               otx2_err("Failed to set smq, rc=%d", rc);
+               return -EIO;
+       }
+
+       /* Enable sqb_aura fc */
+       rc = otx2_nix_sq_sqb_aura_fc(txq, true);
+       if (rc < 0) {
+               otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
+               return rc;
+       }
+
+       /* Disable smq xoff */
+       rc = nix_smq_xoff(dev, smq, false);
+       if (rc) {
+               otx2_err("Failed to enable smq for sq %u", txq->sq);
+               return rc;
+       }
+
+       return 0;
+}
+
 static int
 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
                      uint32_t flags, bool hw_only)
@@ -431,10 +1147,11 @@ static int
 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
 {
        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+       struct otx2_nix_tm_node *tm_node;
+       uint16_t sq, smq, rr_quantum;
+       struct otx2_eth_txq *txq;
        int rc;
 
-       RTE_SET_USED(xmit_enable);
-
        nix_tm_update_parent_info(dev);
 
        rc = nix_tm_send_txsch_alloc_msg(dev);
@@ -443,7 +1160,49 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
                return rc;
        }
 
-       return 0;
+       rc = nix_tm_txsch_reg_config(dev);
+       if (rc) {
+               otx2_err("TM failed to configure sched registers=%d", rc);
+               return rc;
+       }
+
+       /* Enable xmit as all the topology is ready */
+       TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+               if (tm_node->flags & NIX_TM_NODE_ENABLED)
+                       continue;
+
+               /* Enable xmit on sq */
+               if (tm_node->level_id != OTX2_TM_LVL_QUEUE) {
+                       tm_node->flags |= NIX_TM_NODE_ENABLED;
+                       continue;
+               }
+
+               /* Don't enable SMQ or mark as enable */
+               if (!xmit_enable)
+                       continue;
+
+               sq = tm_node->id;
+               if (sq > eth_dev->data->nb_tx_queues) {
+                       rc = -EFAULT;
+                       break;
+               }
+
+               txq = eth_dev->data->tx_queues[sq];
+
+               smq = tm_node->parent->hw_id;
+               rr_quantum = (tm_node->weight *
+                             NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT;
+
+               rc = nix_tm_sw_xon(txq, smq, rr_quantum);
+               if (rc)
+                       break;
+               tm_node->flags |= NIX_TM_NODE_ENABLED;
+       }
+
+       if (rc)
+               otx2_err("TM failed to enable xmit on sq %u, rc=%d", sq, rc);
+
+       return rc;
 }
 
 static int
@@ -600,3 +1359,38 @@ otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
        dev->tm_flags = 0;
        return 0;
 }
+
+int
+otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
+                         uint32_t *rr_quantum, uint16_t *smq)
+{
+       struct otx2_nix_tm_node *tm_node;
+       int rc;
+
+       /* 0..sq_cnt-1 are leaf nodes */
+       if (sq >= dev->tm_leaf_cnt)
+               return -EINVAL;
+
+       /* Search for internal node first */
+       tm_node = nix_tm_node_search(dev, sq, false);
+       if (!tm_node)
+               tm_node = nix_tm_node_search(dev, sq, true);
+
+       /* Check if we found a valid leaf node */
+       if (!tm_node || tm_node->level_id != OTX2_TM_LVL_QUEUE ||
+           !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
+               return -EIO;
+       }
+
+       /* Get SMQ Id of leaf node's parent */
+       *smq = tm_node->parent->hw_id;
+       *rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX)
+               / MAX_SCHED_WEIGHT;
+
+       rc = nix_smq_xoff(dev, *smq, false);
+       if (rc)
+               return rc;
+       tm_node->flags |= NIX_TM_NODE_ENABLED;
+
+       return 0;
+}