common/cnxk: add NIX TM hierarchy enable/disable
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Tue, 6 Apr 2021 14:41:28 +0000 (20:11 +0530)
committerJerin Jacob <jerinj@marvell.com>
Fri, 9 Apr 2021 06:32:24 +0000 (08:32 +0200)
Add support to enable or disable hierarchy along with
allocating node HW resources such as shapers and schedulers
and configuring them to match the user created or default
hierarchy.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
drivers/common/cnxk/roc_nix.h
drivers/common/cnxk/roc_nix_priv.h
drivers/common/cnxk/roc_nix_tm.c
drivers/common/cnxk/roc_nix_tm_ops.c
drivers/common/cnxk/roc_nix_tm_utils.c
drivers/common/cnxk/version.map

index 52e001c..7bf3435 100644 (file)
@@ -391,6 +391,14 @@ roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id);
 struct roc_nix_tm_shaper_profile *__roc_api roc_nix_tm_shaper_profile_next(
        struct roc_nix *roc_nix, struct roc_nix_tm_shaper_profile *__prev);
 
+/*
+ * TM hierarchy enable/disable API.
+ */
+int __roc_api roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix,
+                                         enum roc_nix_tm_tree tree,
+                                         bool xmit_enable);
+
 /*
  * TM utilities API.
  */
index 9384bdd..5d54bd2 100644 (file)
@@ -340,7 +340,10 @@ int nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
                             bool above_thresh);
 void nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp);
 
+int nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree);
 int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
+int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
+                        bool rr_quantum_only);
 
 /*
  * TM priv utils.
@@ -369,6 +372,19 @@ bool nix_tm_child_res_valid(struct nix_tm_node_list *list,
                            struct nix_tm_node *parent);
 uint16_t nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig,
                                  uint16_t *schq, enum roc_nix_tm_tree tree);
+uint8_t nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,
+                               volatile uint64_t *regval);
+uint8_t nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
+                                volatile uint64_t *reg,
+                                volatile uint64_t *regval,
+                                volatile uint64_t *regval_mask);
+uint8_t nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
+                             volatile uint64_t *reg,
+                             volatile uint64_t *regval);
+uint8_t nix_tm_shaper_reg_prep(struct nix_tm_node *node,
+                              struct nix_tm_shaper_profile *profile,
+                              volatile uint64_t *reg,
+                              volatile uint64_t *regval);
 struct nix_tm_node *nix_tm_node_alloc(void);
 void nix_tm_node_free(struct nix_tm_node *node);
 struct nix_tm_shaper_profile *nix_tm_shaper_profile_alloc(void);
index 581de4b..762c85a 100644 (file)
@@ -30,6 +30,93 @@ nix_tm_clear_shaper_profiles(struct nix *nix)
        }
 }
 
+static int
+nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
+{
+       uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
+       uint64_t regval[MAX_REGS_PER_MBOX_MSG];
+       struct nix_tm_shaper_profile *profile;
+       uint64_t reg[MAX_REGS_PER_MBOX_MSG];
+       struct mbox *mbox = (&nix->dev)->mbox;
+       struct nix_txschq_config *req;
+       int rc = -EFAULT;
+       uint32_t hw_lvl;
+       uint8_t k = 0;
+
+       memset(regval, 0, sizeof(regval));
+       memset(regval_mask, 0, sizeof(regval_mask));
+
+       profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
+       hw_lvl = node->hw_lvl;
+
+       /* Need this trigger to configure TL1 */
+       if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
+               /* Prepare default conf for TL1 */
+               req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+               req->lvl = NIX_TXSCH_LVL_TL1;
+
+               k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
+                                           req->regval);
+               req->num_regs = k;
+               rc = mbox_process(mbox);
+               if (rc)
+                       goto error;
+       }
+
+       /* Prepare topology config */
+       k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
+
+       /* Prepare schedule config */
+       k += nix_tm_sched_reg_prep(nix, node, &reg[k], &regval[k]);
+
+       /* Prepare shaping config */
+       k += nix_tm_shaper_reg_prep(node, profile, &reg[k], &regval[k]);
+
+       if (!k)
+               return 0;
+
+       /* Copy and send config mbox */
+       req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+       req->lvl = hw_lvl;
+       req->num_regs = k;
+
+       mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
+       mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
+       mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
+
+       rc = mbox_process(mbox);
+       if (rc)
+               goto error;
+
+       return 0;
+error:
+       plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
+       return rc;
+}
+
+int
+nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
+{
+       struct nix_tm_node_list *list;
+       struct nix_tm_node *node;
+       uint32_t hw_lvl;
+       int rc = 0;
+
+       list = nix_tm_node_list(nix, tree);
+
+       for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
+               TAILQ_FOREACH(node, list, node) {
+                       if (node->hw_lvl != hw_lvl)
+                               continue;
+                       rc = nix_tm_node_reg_conf(nix, node);
+                       if (rc)
+                               goto exit;
+               }
+       }
+exit:
+       return rc;
+}
+
 int
 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
 {
@@ -477,6 +564,66 @@ nix_tm_sq_flush_post(struct roc_nix_sq *sq)
        return 0;
 }
 
+int
+nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
+                    bool rr_quantum_only)
+{
+       struct mbox *mbox = (&nix->dev)->mbox;
+       uint16_t qid = node->id, smq;
+       uint64_t rr_quantum;
+       int rc;
+
+       smq = node->parent->hw_id;
+       rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
+
+       if (rr_quantum_only)
+               plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
+                          rr_quantum);
+       else
+               plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
+                          qid, smq, rr_quantum);
+
+       if (qid > nix->nb_tx_queues)
+               return -EFAULT;
+
+       if (roc_model_is_cn9k()) {
+               struct nix_aq_enq_req *aq;
+
+               aq = mbox_alloc_msg_nix_aq_enq(mbox);
+               aq->qidx = qid;
+               aq->ctype = NIX_AQ_CTYPE_SQ;
+               aq->op = NIX_AQ_INSTOP_WRITE;
+
+               /* smq update only when needed */
+               if (!rr_quantum_only) {
+                       aq->sq.smq = smq;
+                       aq->sq_mask.smq = ~aq->sq_mask.smq;
+               }
+               aq->sq.smq_rr_quantum = rr_quantum;
+               aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
+       } else {
+               struct nix_cn10k_aq_enq_req *aq;
+
+               aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+               aq->qidx = qid;
+               aq->ctype = NIX_AQ_CTYPE_SQ;
+               aq->op = NIX_AQ_INSTOP_WRITE;
+
+               /* smq update only when needed */
+               if (!rr_quantum_only) {
+                       aq->sq.smq = smq;
+                       aq->sq_mask.smq = ~aq->sq_mask.smq;
+               }
+               aq->sq.smq_rr_weight = rr_quantum;
+               aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
+       }
+
+       rc = mbox_process(mbox);
+       if (rc)
+               plt_err("Failed to set smq, rc=%d", rc);
+       return rc;
+}
+
 int
 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
                         bool above_thresh)
index 1e952c4..6bb0766 100644 (file)
@@ -309,3 +309,237 @@ roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
 {
        return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
 }
+
+int
+roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
+{
+       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+       uint16_t sqb_cnt, head_off, tail_off;
+       uint16_t sq_cnt = nix->nb_tx_queues;
+       struct mbox *mbox = (&nix->dev)->mbox;
+       struct nix_tm_node_list *list;
+       enum roc_nix_tm_tree tree;
+       struct nix_tm_node *node;
+       struct roc_nix_sq *sq;
+       uint64_t wdata, val;
+       uintptr_t regaddr;
+       int rc = -1, i;
+
+       if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+               return 0;
+
+       plt_tm_dbg("Disabling hierarchy on %s", nix->pci_dev->name);
+
+       tree = nix->tm_tree;
+       list = nix_tm_node_list(nix, tree);
+
+       /* Enable CGX RXTX to drain pkts */
+       if (!roc_nix->io_enabled) {
+               /* Though it enables both RX MCAM Entries and CGX Link
+                * we assume all the rx queues are stopped way back.
+                */
+               mbox_alloc_msg_nix_lf_start_rx(mbox);
+               rc = mbox_process(mbox);
+               if (rc) {
+                       plt_err("cgx start failed, rc=%d", rc);
+                       return rc;
+               }
+       }
+
+       /* XON all SMQ's */
+       TAILQ_FOREACH(node, list, node) {
+               if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+                       continue;
+               if (!(node->flags & NIX_TM_NODE_HWRES))
+                       continue;
+
+               rc = nix_tm_smq_xoff(nix, node, false);
+               if (rc) {
+                       plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+                               rc);
+                       goto cleanup;
+               }
+       }
+
+       /* Flush all tx queues */
+       for (i = 0; i < sq_cnt; i++) {
+               sq = nix->sqs[i];
+               if (!sq)
+                       continue;
+
+               rc = roc_nix_tm_sq_aura_fc(sq, false);
+               if (rc) {
+                       plt_err("Failed to disable sqb aura fc, rc=%d", rc);
+                       goto cleanup;
+               }
+
+               /* Wait for sq entries to be flushed */
+               rc = roc_nix_tm_sq_flush_spin(sq);
+               if (rc) {
+                       plt_err("Failed to drain sq, rc=%d\n", rc);
+                       goto cleanup;
+               }
+       }
+
+       /* XOFF & Flush all SMQ's. HRM mandates
+        * all SQ's empty before SMQ flush is issued.
+        */
+       TAILQ_FOREACH(node, list, node) {
+               if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+                       continue;
+               if (!(node->flags & NIX_TM_NODE_HWRES))
+                       continue;
+
+               rc = nix_tm_smq_xoff(nix, node, true);
+               if (rc) {
+                       plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+                               rc);
+                       goto cleanup;
+               }
+
+               node->flags &= ~NIX_TM_NODE_ENABLED;
+       }
+
+       /* Verify sanity of all tx queues */
+       for (i = 0; i < sq_cnt; i++) {
+               sq = nix->sqs[i];
+               if (!sq)
+                       continue;
+
+               wdata = ((uint64_t)sq->qid << 32);
+               regaddr = nix->base + NIX_LF_SQ_OP_STATUS;
+               val = roc_atomic64_add_nosync(wdata, (int64_t *)regaddr);
+
+               sqb_cnt = val & 0xFFFF;
+               head_off = (val >> 20) & 0x3F;
+               tail_off = (val >> 28) & 0x3F;
+
+               if (sqb_cnt > 1 || head_off != tail_off ||
+                   (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
+                       plt_err("Failed to gracefully flush sq %u", sq->qid);
+       }
+
+       nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
+cleanup:
+       /* Restore cgx state */
+       if (!roc_nix->io_enabled) {
+               mbox_alloc_msg_nix_lf_stop_rx(mbox);
+               rc |= mbox_process(mbox);
+       }
+       return rc;
+}
+
+int
+roc_nix_tm_hierarchy_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree tree,
+                           bool xmit_enable)
+{
+       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+       struct nix_tm_node_list *list;
+       struct nix_tm_node *node;
+       struct roc_nix_sq *sq;
+       uint32_t tree_mask;
+       uint16_t sq_id;
+       int rc;
+
+       if (tree >= ROC_NIX_TM_TREE_MAX)
+               return NIX_ERR_PARAM;
+
+       if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
+               if (nix->tm_tree != tree)
+                       return -EBUSY;
+               return 0;
+       }
+
+       plt_tm_dbg("Enabling hierarchy on %s, xmit_ena %u, tree %u",
+                  nix->pci_dev->name, xmit_enable, tree);
+
+       /* Free hw resources of other trees */
+       tree_mask = NIX_TM_TREE_MASK_ALL;
+       tree_mask &= ~BIT(tree);
+
+       rc = nix_tm_free_resources(roc_nix, tree_mask, true);
+       if (rc) {
+               plt_err("failed to free resources of other trees, rc=%d", rc);
+               return rc;
+       }
+
+       /* Update active tree before starting to do anything */
+       nix->tm_tree = tree;
+
+       nix_tm_update_parent_info(nix, tree);
+
+       rc = nix_tm_alloc_txschq(nix, tree);
+       if (rc) {
+               plt_err("TM failed to alloc tm resources=%d", rc);
+               return rc;
+       }
+
+       rc = nix_tm_assign_resources(nix, tree);
+       if (rc) {
+               plt_err("TM failed to assign tm resources=%d", rc);
+               return rc;
+       }
+
+       rc = nix_tm_txsch_reg_config(nix, tree);
+       if (rc) {
+               plt_err("TM failed to configure sched registers=%d", rc);
+               return rc;
+       }
+
+       list = nix_tm_node_list(nix, tree);
+       /* Mark all non-leaf's as enabled */
+       TAILQ_FOREACH(node, list, node) {
+               if (!nix_tm_is_leaf(nix, node->lvl))
+                       node->flags |= NIX_TM_NODE_ENABLED;
+       }
+
+       if (!xmit_enable)
+               goto skip_sq_update;
+
+       /* Update SQ Sched Data while SQ is idle */
+       TAILQ_FOREACH(node, list, node) {
+               if (!nix_tm_is_leaf(nix, node->lvl))
+                       continue;
+
+               rc = nix_tm_sq_sched_conf(nix, node, false);
+               if (rc) {
+                       plt_err("SQ %u sched update failed, rc=%d", node->id,
+                               rc);
+                       return rc;
+               }
+       }
+
+       /* Finally XON all SMQ's */
+       TAILQ_FOREACH(node, list, node) {
+               if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+                       continue;
+
+               rc = nix_tm_smq_xoff(nix, node, false);
+               if (rc) {
+                       plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+                               rc);
+                       return rc;
+               }
+       }
+
+       /* Enable xmit as all the topology is ready */
+       TAILQ_FOREACH(node, list, node) {
+               if (!nix_tm_is_leaf(nix, node->lvl))
+                       continue;
+
+               sq_id = node->id;
+               sq = nix->sqs[sq_id];
+
+               rc = roc_nix_tm_sq_aura_fc(sq, true);
+               if (rc) {
+                       plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
+                               rc);
+                       return rc;
+               }
+               node->flags |= NIX_TM_NODE_ENABLED;
+       }
+
+skip_sq_update:
+       nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
+       return 0;
+}
index 45de9f6..b644716 100644 (file)
@@ -5,6 +5,14 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
+static inline uint64_t
+nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
+{
+       return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
+              (shaper->div_exp << 13) | (shaper->exponent << 9) |
+              (shaper->mantissa << 1);
+}
+
 uint16_t
 nix_tm_lvl2nix_tl1_root(uint32_t lvl)
 {
@@ -50,6 +58,32 @@ nix_tm_lvl2nix(struct nix *nix, uint32_t lvl)
                return nix_tm_lvl2nix_tl2_root(lvl);
 }
 
+static uint8_t
+nix_tm_relchan_get(struct nix *nix)
+{
+       return nix->tx_chan_base & 0xff;
+}
+
+static int
+nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id,
+                       enum roc_nix_tm_tree tree)
+{
+       struct nix_tm_node *child_node;
+       struct nix_tm_node_list *list;
+
+       list = nix_tm_node_list(nix, tree);
+
+       TAILQ_FOREACH(child_node, list, node) {
+               if (!child_node->parent)
+                       continue;
+               if (!(child_node->parent->id == node_id))
+                       continue;
+               if (child_node->priority == child_node->parent->rr_prio)
+                       continue;
+               return child_node->hw_id - child_node->priority;
+       }
+       return 0;
+}
 
 struct nix_tm_shaper_profile *
 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id)
@@ -177,6 +211,39 @@ nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
        return NIX_TM_SHAPER_BURST(exponent, mantissa);
 }
 
+static void
+nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
+                      struct nix_tm_shaper_data *cir,
+                      struct nix_tm_shaper_data *pir)
+{
+       if (!profile)
+               return;
+
+       /* Calculate CIR exponent and mantissa */
+       if (profile->commit.rate)
+               cir->rate = nix_tm_shaper_rate_conv(
+                       profile->commit.rate, &cir->exponent, &cir->mantissa,
+                       &cir->div_exp);
+
+       /* Calculate PIR exponent and mantissa */
+       if (profile->peak.rate)
+               pir->rate = nix_tm_shaper_rate_conv(
+                       profile->peak.rate, &pir->exponent, &pir->mantissa,
+                       &pir->div_exp);
+
+       /* Calculate CIR burst exponent and mantissa */
+       if (profile->commit.size)
+               cir->burst = nix_tm_shaper_burst_conv(profile->commit.size,
+                                                     &cir->burst_exponent,
+                                                     &cir->burst_mantissa);
+
+       /* Calculate PIR burst exponent and mantissa */
+       if (profile->peak.size)
+               pir->burst = nix_tm_shaper_burst_conv(profile->peak.size,
+                                                     &pir->burst_exponent,
+                                                     &pir->burst_mantissa);
+}
+
 uint32_t
 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree,
                uint32_t *rr_prio, uint32_t *max_prio)
@@ -308,6 +375,349 @@ nix_tm_child_res_valid(struct nix_tm_node_list *list,
        return true;
 }
 
+uint8_t
+nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,
+                       volatile uint64_t *regval)
+{
+       uint8_t k = 0;
+
+       /*
+        * Default config for TL1.
+        * For VF this is always ignored.
+        */
+       plt_tm_dbg("Default config for main root %s(%u)",
+                  nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq);
+
+       /* Set DWRR quantum */
+       reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
+       regval[k] = NIX_TM_TL1_DFLT_RR_QTM;
+       k++;
+
+       reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
+       regval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1);
+       k++;
+
+       reg[k] = NIX_AF_TL1X_CIR(schq);
+       regval[k] = 0;
+       k++;
+
+       return k;
+}
+
+uint8_t
+nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
+                        volatile uint64_t *reg, volatile uint64_t *regval,
+                        volatile uint64_t *regval_mask)
+{
+       uint8_t k = 0, hw_lvl, parent_lvl;
+       uint64_t parent = 0, child = 0;
+       enum roc_nix_tm_tree tree;
+       uint32_t rr_prio, schq;
+       uint16_t link, relchan;
+
+       tree = node->tree;
+       schq = node->hw_id;
+       hw_lvl = node->hw_lvl;
+       parent_lvl = hw_lvl + 1;
+       rr_prio = node->rr_prio;
+
+       /* Root node will not have a parent node */
+       if (hw_lvl == nix->tm_root_lvl)
+               parent = node->parent_hw_id;
+       else
+               parent = node->parent->hw_id;
+
+       link = nix->tx_link;
+       relchan = nix_tm_relchan_get(nix);
+
+       if (hw_lvl != NIX_TXSCH_LVL_SMQ)
+               child = nix_tm_find_prio_anchor(nix, node->id, tree);
+
+       /* Override default rr_prio when TL1
+        * Static Priority is disabled
+        */
+       if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) {
+               rr_prio = NIX_TM_TL1_DFLT_RR_PRIO;
+               child = 0;
+       }
+
+       plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u"
+                  " prio_anchor %" PRIu64 " rr_prio %u (%p)",
+                  nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl),
+                  parent, node->lvl, node->id, child, rr_prio, node);
+
+       /* Prepare Topology and Link config */
+       switch (hw_lvl) {
+       case NIX_TXSCH_LVL_SMQ:
+
+               /* Set xoff which will be cleared later */
+               reg[k] = NIX_AF_SMQX_CFG(schq);
+               regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |
+                            ((nix->mtu & 0xFFFF) << 8));
+               regval_mask[k] =
+                       ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | GENMASK_ULL(23, 8));
+               k++;
+
+               /* Parent and schedule conf */
+               reg[k] = NIX_AF_MDQX_PARENT(schq);
+               regval[k] = parent << 16;
+               k++;
+
+               break;
+       case NIX_TXSCH_LVL_TL4:
+               /* Parent and schedule conf */
+               reg[k] = NIX_AF_TL4X_PARENT(schq);
+               regval[k] = parent << 16;
+               k++;
+
+               reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
+               regval[k] = (child << 32) | (rr_prio << 1);
+               k++;
+
+               /* Configure TL4 to send to SDP channel instead of CGX/LBK */
+               if (nix->sdp_link) {
+                       reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+                       regval[k] = BIT_ULL(12);
+                       k++;
+               }
+               break;
+       case NIX_TXSCH_LVL_TL3:
+               /* Parent and schedule conf */
+               reg[k] = NIX_AF_TL3X_PARENT(schq);
+               regval[k] = parent << 16;
+               k++;
+
+               reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
+               regval[k] = (child << 32) | (rr_prio << 1);
+               k++;
+
+               /* Link configuration */
+               if (!nix->sdp_link &&
+                   nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
+                       reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
+                       regval[k] = BIT_ULL(12) | relchan;
+                       k++;
+               }
+
+               break;
+       case NIX_TXSCH_LVL_TL2:
+               /* Parent and schedule conf */
+               reg[k] = NIX_AF_TL2X_PARENT(schq);
+               regval[k] = parent << 16;
+               k++;
+
+               reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
+               regval[k] = (child << 32) | (rr_prio << 1);
+               k++;
+
+               /* Link configuration */
+               if (!nix->sdp_link &&
+                   nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
+                       reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
+                       regval[k] = BIT_ULL(12) | relchan;
+                       k++;
+               }
+
+               break;
+       case NIX_TXSCH_LVL_TL1:
+               reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
+               regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
+               k++;
+
+               break;
+       }
+
+       return k;
+}
+
+uint8_t
+nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
+                     volatile uint64_t *reg, volatile uint64_t *regval)
+{
+       uint64_t strict_prio = node->priority;
+       uint32_t hw_lvl = node->hw_lvl;
+       uint32_t schq = node->hw_id;
+       uint64_t rr_quantum;
+       uint8_t k = 0;
+
+       rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
+
+       /* For children to root, strict prio is default if either
+        * device root is TL2 or TL1 Static Priority is disabled.
+        */
+       if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
+           (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP))
+               strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
+
+       plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
+                  "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
+                  nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
+                  strict_prio, rr_quantum, node);
+
+       switch (hw_lvl) {
+       case NIX_TXSCH_LVL_SMQ:
+               reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
+               regval[k] = (strict_prio << 24) | rr_quantum;
+               k++;
+
+               break;
+       case NIX_TXSCH_LVL_TL4:
+               reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
+               regval[k] = (strict_prio << 24) | rr_quantum;
+               k++;
+
+               break;
+       case NIX_TXSCH_LVL_TL3:
+               reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
+               regval[k] = (strict_prio << 24) | rr_quantum;
+               k++;
+
+               break;
+       case NIX_TXSCH_LVL_TL2:
+               reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
+               regval[k] = (strict_prio << 24) | rr_quantum;
+               k++;
+
+               break;
+       case NIX_TXSCH_LVL_TL1:
+               reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
+               regval[k] = rr_quantum;
+               k++;
+
+               break;
+       }
+
+       return k;
+}
+
+uint8_t
+nix_tm_shaper_reg_prep(struct nix_tm_node *node,
+                      struct nix_tm_shaper_profile *profile,
+                      volatile uint64_t *reg, volatile uint64_t *regval)
+{
+       struct nix_tm_shaper_data cir, pir;
+       uint32_t schq = node->hw_id;
+       uint64_t adjust = 0;
+       uint8_t k = 0;
+
+       memset(&cir, 0, sizeof(cir));
+       memset(&pir, 0, sizeof(pir));
+       nix_tm_shaper_conf_get(profile, &cir, &pir);
+
+       if (node->pkt_mode)
+               adjust = 1;
+       else if (profile)
+               adjust = profile->pkt_len_adj;
+
+       plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
+                  "pir %" PRIu64 "(%" PRIu64 "B),"
+                  " cir %" PRIu64 "(%" PRIu64 "B)"
+                  "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
+                  nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
+                  pir.rate, pir.burst, cir.rate, cir.burst, adjust,
+                  node->pkt_mode, node);
+
+       switch (node->hw_lvl) {
+       case NIX_TXSCH_LVL_SMQ:
+               /* Configure PIR, CIR */
+               reg[k] = NIX_AF_MDQX_PIR(schq);
+               regval[k] = (pir.rate && pir.burst) ?
+                                         (nix_tm_shaper2regval(&pir) | 1) :
+                                         0;
+               k++;
+
+               reg[k] = NIX_AF_MDQX_CIR(schq);
+               regval[k] = (cir.rate && cir.burst) ?
+                                         (nix_tm_shaper2regval(&cir) | 1) :
+                                         0;
+               k++;
+
+               /* Configure RED ALG */
+               reg[k] = NIX_AF_MDQX_SHAPE(schq);
+               regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
+                            (uint64_t)node->pkt_mode << 24);
+               k++;
+               break;
+       case NIX_TXSCH_LVL_TL4:
+               /* Configure PIR, CIR */
+               reg[k] = NIX_AF_TL4X_PIR(schq);
+               regval[k] = (pir.rate && pir.burst) ?
+                                         (nix_tm_shaper2regval(&pir) | 1) :
+                                         0;
+               k++;
+
+               reg[k] = NIX_AF_TL4X_CIR(schq);
+               regval[k] = (cir.rate && cir.burst) ?
+                                         (nix_tm_shaper2regval(&cir) | 1) :
+                                         0;
+               k++;
+
+               /* Configure RED algo */
+               reg[k] = NIX_AF_TL4X_SHAPE(schq);
+               regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
+                            (uint64_t)node->pkt_mode << 24);
+               k++;
+               break;
+       case NIX_TXSCH_LVL_TL3:
+               /* Configure PIR, CIR */
+               reg[k] = NIX_AF_TL3X_PIR(schq);
+               regval[k] = (pir.rate && pir.burst) ?
+                                         (nix_tm_shaper2regval(&pir) | 1) :
+                                         0;
+               k++;
+
+               reg[k] = NIX_AF_TL3X_CIR(schq);
+               regval[k] = (cir.rate && cir.burst) ?
+                                         (nix_tm_shaper2regval(&cir) | 1) :
+                                         0;
+               k++;
+
+               /* Configure RED algo */
+               reg[k] = NIX_AF_TL3X_SHAPE(schq);
+               regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
+                            (uint64_t)node->pkt_mode);
+               k++;
+
+               break;
+       case NIX_TXSCH_LVL_TL2:
+               /* Configure PIR, CIR */
+               reg[k] = NIX_AF_TL2X_PIR(schq);
+               regval[k] = (pir.rate && pir.burst) ?
+                                         (nix_tm_shaper2regval(&pir) | 1) :
+                                         0;
+               k++;
+
+               reg[k] = NIX_AF_TL2X_CIR(schq);
+               regval[k] = (cir.rate && cir.burst) ?
+                                         (nix_tm_shaper2regval(&cir) | 1) :
+                                         0;
+               k++;
+
+               /* Configure RED algo */
+               reg[k] = NIX_AF_TL2X_SHAPE(schq);
+               regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
+                            (uint64_t)node->pkt_mode << 24);
+               k++;
+
+               break;
+       case NIX_TXSCH_LVL_TL1:
+               /* Configure CIR */
+               reg[k] = NIX_AF_TL1X_CIR(schq);
+               regval[k] = (cir.rate && cir.burst) ?
+                                         (nix_tm_shaper2regval(&cir) | 1) :
+                                         0;
+               k++;
+
+               /* Configure length disable and adjust */
+               reg[k] = NIX_AF_TL1X_SHAPE(schq);
+               regval[k] = (adjust | (uint64_t)node->pkt_mode << 24);
+               k++;
+               break;
+       }
+
+       return k;
+}
+
 uint8_t
 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
                    volatile uint64_t *reg, volatile uint64_t *regval)
index 3c79023..2a1cc98 100644 (file)
@@ -105,6 +105,8 @@ INTERNAL {
        roc_nix_switch_hdr_set;
        roc_nix_eeprom_info_get;
        roc_nix_tm_free_resources;
+       roc_nix_tm_hierarchy_disable;
+       roc_nix_tm_hierarchy_enable;
        roc_nix_tm_node_add;
        roc_nix_tm_node_delete;
        roc_nix_tm_node_get;