void
nix_tm_clear_shaper_profiles(struct nix *nix)
{
- struct nix_tm_shaper_profile *shaper_profile;
+ struct nix_tm_shaper_profile *shaper_profile, *tmp;
+ struct nix_tm_shaper_profile_list *list;
- shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
- while (shaper_profile != NULL) {
+ list = &nix->shaper_profile_list;
+ PLT_TAILQ_FOREACH_SAFE(shaper_profile, list, shaper, tmp) {
if (shaper_profile->ref_cnt)
plt_warn("Shaper profile %u has non zero references",
shaper_profile->id);
TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
nix_tm_shaper_profile_free(shaper_profile);
- shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
}
}
+static int
+nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
+{
+ uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
+ uint64_t regval[MAX_REGS_PER_MBOX_MSG];
+ struct nix_tm_shaper_profile *profile;
+ uint64_t reg[MAX_REGS_PER_MBOX_MSG];
+ struct mbox *mbox = (&nix->dev)->mbox;
+ struct nix_txschq_config *req;
+ int rc = -EFAULT;
+ uint32_t hw_lvl;
+ uint8_t k = 0;
+
+ memset(regval, 0, sizeof(regval));
+ memset(regval_mask, 0, sizeof(regval_mask));
+
+ profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
+ hw_lvl = node->hw_lvl;
+
+ /* Need this trigger to configure TL1 */
+ if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
+ /* Prepare default conf for TL1 */
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_TL1;
+
+ k = nix_tm_tl1_default_prep(nix, node->parent_hw_id, req->reg,
+ req->regval);
+ req->num_regs = k;
+ rc = mbox_process(mbox);
+ if (rc)
+ goto error;
+ }
+
+ /* Prepare topology config */
+ k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
+
+ /* Prepare schedule config */
+ k += nix_tm_sched_reg_prep(nix, node, ®[k], ®val[k]);
+
+ /* Prepare shaping config */
+ k += nix_tm_shaper_reg_prep(node, profile, ®[k], ®val[k]);
+
+ if (!k)
+ return 0;
+
+ /* Copy and send config mbox */
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = hw_lvl;
+ req->num_regs = k;
+
+ mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
+ mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
+ mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
+
+ rc = mbox_process(mbox);
+ if (rc)
+ goto error;
+
+ return 0;
+error:
+ plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
+ return rc;
+}
+
+int
+nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
+{
+ struct nix_tm_node_list *list;
+ bool is_pf_or_lbk = false;
+ struct nix_tm_node *node;
+ bool skip_bp = false;
+ uint32_t hw_lvl;
+ int rc = 0;
+
+ list = nix_tm_node_list(nix, tree);
+
+ if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
+ is_pf_or_lbk = true;
+
+ for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != hw_lvl)
+ continue;
+
+ /* Only one TL3/TL2 Link config should have BP enable
+ * set per channel only for PF or lbk vf.
+ */
+ node->bp_capa = 0;
+ if (is_pf_or_lbk && !skip_bp &&
+ node->hw_lvl == nix->tm_link_cfg_lvl) {
+ node->bp_capa = 1;
+ skip_bp = false;
+ }
+
+ rc = nix_tm_node_reg_conf(nix, node);
+ if (rc)
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+}
+
int
nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
{
return 0;
}
+static int
+nix_tm_root_node_get(struct nix *nix, int tree)
+{
+ struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
+ struct nix_tm_node *tm_node;
+
+ TAILQ_FOREACH(tm_node, list, node) {
+ if (tm_node->hw_lvl == nix->tm_root_lvl)
+ return 1;
+ }
+
+ return 0;
+}
+
int
nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
{
if (nix_tm_node_search(nix, node_id, tree))
return NIX_ERR_TM_NODE_EXISTS;
+ /* Check if root node exists */
+ if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
+ return NIX_ERR_TM_NODE_EXISTS;
+
profile = nix_tm_shaper_profile_search(nix, profile_id);
if (!nix_tm_is_leaf(nix, lvl)) {
/* Check if shaper profile exists for non leaf node */
if (rc)
return rc;
- if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
+ if (node->weight > roc_nix_tm_max_sched_wt_get())
return NIX_ERR_TM_WEIGHT_EXCEED;
/* Maintain minimum weight */
return 0;
}
+int
+nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
+ bool enable)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ enum roc_nix_tm_tree tree = nix->tm_tree;
+ struct mbox *mbox = (&nix->dev)->mbox;
+ struct nix_txschq_config *req = NULL;
+ struct nix_tm_node_list *list;
+ uint16_t link = nix->tx_link;
+ struct nix_tm_node *sq_node;
+ struct nix_tm_node *parent;
+ struct nix_tm_node *node;
+ uint8_t k = 0;
+ int rc = 0;
+
+ sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+ if (!sq_node)
+ return -ENOENT;
+
+ parent = sq_node->parent;
+ while (parent) {
+ if (parent->lvl == ROC_TM_LVL_SCH2)
+ break;
+
+ parent = parent->parent;
+ }
+ if (!parent)
+ return -ENOENT;
+
+ list = nix_tm_node_list(nix, tree);
+
+ if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != nix->tm_link_cfg_lvl)
+ continue;
+
+ if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
+ continue;
+
+ if (node->hw_id != parent->hw_id)
+ continue;
+
+ if (!req) {
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = nix->tm_link_cfg_lvl;
+ k = 0;
+ }
+
+ req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
+ req->regval[k] = enable ? tc : 0;
+ req->regval[k] |= enable ? BIT_ULL(13) : 0;
+ req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
+ k++;
+
+ if (k >= MAX_REGS_PER_MBOX_MSG) {
+ req->num_regs = k;
+ rc = mbox_process(mbox);
+ if (rc)
+ goto err;
+ req = NULL;
+ }
+ }
+
+ if (req) {
+ req->num_regs = k;
+ rc = mbox_process(mbox);
+ if (rc)
+ goto err;
+ }
+
+ parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
+ return 0;
+err:
+ plt_err("Failed to %s bp on link %u, rc=%d(%s)",
+ enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc));
+ return rc;
+}
+
+int
+nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_txschq_config *req = NULL, *rsp;
+ enum roc_nix_tm_tree tree = nix->tm_tree;
+ struct mbox *mbox = (&nix->dev)->mbox;
+ struct nix_tm_node_list *list;
+ struct nix_tm_node *node;
+ bool found = false;
+ uint8_t enable = 1;
+ uint8_t k = 0, i;
+ uint16_t link;
+ int rc = 0;
+
+ list = nix_tm_node_list(nix, tree);
+ link = nix->tx_link;
+
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != nix->tm_link_cfg_lvl)
+ continue;
+
+ if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
+ continue;
+
+ found = true;
+ if (!req) {
+ req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->read = 1;
+ req->lvl = nix->tm_link_cfg_lvl;
+ k = 0;
+ }
+
+ req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
+ k++;
+
+ if (k >= MAX_REGS_PER_MBOX_MSG) {
+ req->num_regs = k;
+ rc = mbox_process_msg(mbox, (void **)&rsp);
+ if (rc || rsp->num_regs != k)
+ goto err;
+ req = NULL;
+
+ /* Report it as enabled only if enabled or all */
+ for (i = 0; i < k; i++)
+ enable &= !!(rsp->regval[i] & BIT_ULL(13));
+ }
+ }
+
+ if (req) {
+ req->num_regs = k;
+ rc = mbox_process_msg(mbox, (void **)&rsp);
+ if (rc)
+ goto err;
+ /* Report it as enabled only if enabled or all */
+ for (i = 0; i < k; i++)
+ enable &= !!(rsp->regval[i] & BIT_ULL(13));
+ }
+
+ *is_enabled = found ? !!enable : false;
+ return 0;
+err:
+ plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc,
+ roc_error_msg_get(rc));
+ return rc;
+}
+
int
nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
{
return 0;
exit:
+ roc_nix_tm_dump(sq->roc_nix);
roc_nix_queues_ctx_dump(sq->roc_nix);
return -EFAULT;
}
struct nix_tm_node *node, *sibling;
struct nix_tm_node_list *list;
enum roc_nix_tm_tree tree;
+ struct msg_req *req;
struct mbox *mbox;
struct nix *nix;
uint16_t qid;
}
}
+ /* Disable backpressure */
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
+ if (rc) {
+ plt_err("Failed to disable backpressure for flush, rc=%d", rc);
+ return rc;
+ }
+
/* Disable smq xoff for case it was enabled earlier */
rc = nix_tm_smq_xoff(nix, node->parent, false);
if (rc) {
rc);
goto cleanup;
}
+
+ req = mbox_alloc_msg_nix_rx_sw_sync(mbox);
+ if (!req)
+ return -ENOSPC;
+
+ rc = mbox_process(mbox);
cleanup:
/* Restore cgx state */
if (!roc_nix->io_enabled) {
}
}
+ if (!nix->rx_pause)
+ return 0;
+
+ /* Restore backpressure */
+ rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
+ if (rc) {
+ plt_err("Failed to restore backpressure, rc=%d", rc);
+ return rc;
+ }
+
return 0;
}
+int
+nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
+ bool rr_quantum_only)
+{
+ struct mbox *mbox = (&nix->dev)->mbox;
+ uint16_t qid = node->id, smq;
+ uint64_t rr_quantum;
+ int rc;
+
+ smq = node->parent->hw_id;
+ rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
+
+ if (rr_quantum_only)
+ plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
+ rr_quantum);
+ else
+ plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
+ qid, smq, rr_quantum);
+
+ if (qid > nix->nb_tx_queues)
+ return -EFAULT;
+
+ if (roc_model_is_cn9k()) {
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* smq update only when needed */
+ if (!rr_quantum_only) {
+ aq->sq.smq = smq;
+ aq->sq_mask.smq = ~aq->sq_mask.smq;
+ }
+ aq->sq.smq_rr_quantum = rr_quantum;
+ aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
+ } else {
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* smq update only when needed */
+ if (!rr_quantum_only) {
+ aq->sq.smq = smq;
+ aq->sq_mask.smq = ~aq->sq_mask.smq;
+ }
+ aq->sq.smq_rr_weight = rr_quantum;
+ aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
+ }
+
+ rc = mbox_process(mbox);
+ if (rc)
+ plt_err("Failed to set smq, rc=%d", rc);
+ return rc;
+}
+
int
nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
bool above_thresh)
} while (pend);
nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
+ nix->tm_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
return 0;
alloc_err:
for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
return rc;
}
+int
+nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t nonleaf_id = nix->nb_tx_queues;
+ struct nix_tm_node *node = NULL;
+ uint8_t leaf_lvl, lvl, lvl_end;
+ uint32_t parent, i;
+ int rc = 0;
+
+ /* Add ROOT, SCH1, SCH2, SCH3, [SCH4] nodes */
+ parent = ROC_NIX_TM_NODE_ID_INVALID;
+ /* With TL1 access we have an extra level */
+ lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
+ ROC_TM_LVL_SCH3);
+
+ for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_DEFAULT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ parent = nonleaf_id;
+ nonleaf_id++;
+ }
+
+ parent = nonleaf_id - 1;
+ leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+ ROC_TM_LVL_SCH4);
+
+ /* Add leaf nodes */
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = i;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = leaf_lvl;
+ node->tree = ROC_NIX_TM_DEFAULT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+error:
+ nix_tm_node_free(node);
+ return rc;
+}
+
+int
+roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t nonleaf_id = nix->nb_tx_queues;
+ struct nix_tm_node *node = NULL;
+ uint8_t leaf_lvl, lvl, lvl_end;
+ uint32_t parent, i;
+ int rc = 0;
+
+ /* Add ROOT, SCH1, SCH2 nodes */
+ parent = ROC_NIX_TM_NODE_ID_INVALID;
+ lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
+ ROC_TM_LVL_SCH2);
+
+ for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ parent = nonleaf_id;
+ nonleaf_id++;
+ }
+
+ /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
+ lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
+
+ /* Add per queue SMQ nodes i.e SCH4 / SCH3 */
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id + i;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ parent = nonleaf_id;
+ leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
+ ROC_TM_LVL_SCH4);
+
+ /* Add leaf nodes */
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = i;
+ node->parent_id = parent + i;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = leaf_lvl;
+ node->tree = ROC_NIX_TM_RLIMIT;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+error:
+ nix_tm_node_free(node);
+ return rc;
+}
+
+int
+roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ uint32_t nonleaf_id = nix->nb_tx_queues;
+ struct nix_tm_node *node = NULL;
+ uint8_t leaf_lvl, lvl, lvl_end;
+ uint32_t tl2_node_id;
+ uint32_t parent, i;
+ int rc = -ENOMEM;
+
+ parent = ROC_NIX_TM_NODE_ID_INVALID;
+ lvl_end = ROC_TM_LVL_SCH3;
+ leaf_lvl = ROC_TM_LVL_QUEUE;
+
+ /* TL1 node */
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_ROOT;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ /* TL2 node */
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = ROC_TM_LVL_SCH1;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ tl2_node_id = nonleaf_id;
+ nonleaf_id++;
+
+ for (i = 0; i < nix->nb_tx_queues; i++) {
+ parent = tl2_node_id;
+ for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id =
+ ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+ }
+
+ lvl = ROC_TM_LVL_SCH4;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = nonleaf_id;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+
+ parent = nonleaf_id;
+ nonleaf_id++;
+
+ rc = -ENOMEM;
+ node = nix_tm_node_alloc();
+ if (!node)
+ goto error;
+
+ node->id = i;
+ node->parent_id = parent;
+ node->priority = 0;
+ node->weight = NIX_TM_DFLT_RR_WT;
+ node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
+ node->lvl = leaf_lvl;
+ node->tree = ROC_NIX_TM_PFC;
+ node->rel_chan = NIX_TM_CHAN_INVALID;
+
+ rc = nix_tm_node_add(roc_nix, node);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+error:
+ nix_tm_node_free(node);
+ return rc;
+}
+
int
nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
{
bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
}
+ rc = nix_tm_mark_init(nix);
+ if (rc)
+ goto exit;
+
/* Disable TL1 Static Priority when VF's are enabled
* as otherwise VF's TL2 reallocation will be needed
* runtime to support a specific topology of PF.