1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 bitmap_ctzll(uint64_t slab)
14 return __builtin_ctzll(slab);
18 nix_tm_clear_shaper_profiles(struct nix *nix)
20 struct nix_tm_shaper_profile *shaper_profile, *tmp;
21 struct nix_tm_shaper_profile_list *list;
23 list = &nix->shaper_profile_list;
24 PLT_TAILQ_FOREACH_SAFE(shaper_profile, list, shaper, tmp) {
25 if (shaper_profile->ref_cnt)
26 plt_warn("Shaper profile %u has non zero references",
28 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
29 nix_tm_shaper_profile_free(shaper_profile);
34 nix_tm_node_reg_conf(struct nix *nix, struct nix_tm_node *node)
36 uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
37 uint64_t regval[MAX_REGS_PER_MBOX_MSG];
38 struct nix_tm_shaper_profile *profile;
39 uint64_t reg[MAX_REGS_PER_MBOX_MSG];
40 struct mbox *mbox = (&nix->dev)->mbox;
41 struct nix_txschq_config *req;
46 memset(regval, 0, sizeof(regval));
47 memset(regval_mask, 0, sizeof(regval_mask));
49 profile = nix_tm_shaper_profile_search(nix, node->shaper_profile_id);
50 hw_lvl = node->hw_lvl;
52 /* Need this trigger to configure TL1 */
53 if (!nix_tm_have_tl1_access(nix) && hw_lvl == NIX_TXSCH_LVL_TL2) {
54 /* Prepare default conf for TL1 */
55 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
56 req->lvl = NIX_TXSCH_LVL_TL1;
58 k = nix_tm_tl1_default_prep(node->parent_hw_id, req->reg,
61 rc = mbox_process(mbox);
66 /* Prepare topology config */
67 k = nix_tm_topology_reg_prep(nix, node, reg, regval, regval_mask);
69 /* Prepare schedule config */
70 k += nix_tm_sched_reg_prep(nix, node, ®[k], ®val[k]);
72 /* Prepare shaping config */
73 k += nix_tm_shaper_reg_prep(node, profile, ®[k], ®val[k]);
78 /* Copy and send config mbox */
79 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
83 mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
84 mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
85 mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
87 rc = mbox_process(mbox);
93 plt_err("Txschq conf failed for node %p, rc=%d", node, rc);
98 nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree)
100 struct nix_tm_node_list *list;
101 bool is_pf_or_lbk = false;
102 struct nix_tm_node *node;
103 bool skip_bp = false;
107 list = nix_tm_node_list(nix, tree);
109 if ((!dev_is_vf(&nix->dev) || nix->lbk_link) && !nix->sdp_link)
112 for (hw_lvl = 0; hw_lvl <= nix->tm_root_lvl; hw_lvl++) {
113 TAILQ_FOREACH(node, list, node) {
114 if (node->hw_lvl != hw_lvl)
117 /* Only one TL3/TL2 Link config should have BP enable
118 * set per channel only for PF or lbk vf.
121 if (is_pf_or_lbk && !skip_bp &&
122 node->hw_lvl == nix->tm_link_cfg_lvl) {
127 rc = nix_tm_node_reg_conf(nix, node);
137 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
139 struct nix_tm_node *child, *parent;
140 struct nix_tm_node_list *list;
141 uint32_t rr_prio, max_prio;
144 list = nix_tm_node_list(nix, tree);
146 /* Release all the node hw resources locally
147 * if parent marked as dirty and resource exists.
149 TAILQ_FOREACH(child, list, node) {
150 /* Release resource only if parent direct hierarchy changed */
151 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
152 child->parent->child_realloc) {
153 nix_tm_free_node_resource(nix, child);
155 child->max_prio = UINT32_MAX;
158 TAILQ_FOREACH(parent, list, node) {
159 /* Count group of children of same priority i.e are RR */
160 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
163 /* Assuming that multiple RR groups are
164 * not configured based on capability.
166 parent->rr_prio = rr_prio;
167 parent->rr_num = rr_num;
168 parent->max_prio = max_prio;
175 nix_tm_root_node_get(struct nix *nix, int tree)
177 struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
178 struct nix_tm_node *tm_node;
180 TAILQ_FOREACH(tm_node, list, node) {
181 if (tm_node->hw_lvl == nix->tm_root_lvl)
189 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
191 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
192 struct nix_tm_shaper_profile *profile;
193 uint32_t node_id, parent_id, lvl;
194 struct nix_tm_node *parent_node;
195 uint32_t priority, profile_id;
196 uint8_t hw_lvl, exp_next_lvl;
197 enum roc_nix_tm_tree tree;
201 priority = node->priority;
202 parent_id = node->parent_id;
203 profile_id = node->shaper_profile_id;
207 plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
208 "parent %u profile 0x%x tree %u",
209 nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
210 priority, node->weight, parent_id, profile_id, tree);
212 if (tree >= ROC_NIX_TM_TREE_MAX)
213 return NIX_ERR_PARAM;
215 /* Translate sw level id's to nix hw level id's */
216 hw_lvl = nix_tm_lvl2nix(nix, lvl);
217 if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
218 return NIX_ERR_TM_INVALID_LVL;
220 /* Leaf nodes have to be same priority */
221 if (nix_tm_is_leaf(nix, lvl) && priority != 0)
222 return NIX_ERR_TM_INVALID_PRIO;
224 parent_node = nix_tm_node_search(nix, parent_id, tree);
226 if (node_id < nix->nb_tx_queues)
227 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
229 exp_next_lvl = hw_lvl + 1;
231 /* Check if there is no parent node yet */
232 if (hw_lvl != nix->tm_root_lvl &&
233 (!parent_node || parent_node->hw_lvl != exp_next_lvl))
234 return NIX_ERR_TM_INVALID_PARENT;
236 /* Check if a node already exists */
237 if (nix_tm_node_search(nix, node_id, tree))
238 return NIX_ERR_TM_NODE_EXISTS;
240 /* Check if root node exists */
241 if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
242 return NIX_ERR_TM_NODE_EXISTS;
244 profile = nix_tm_shaper_profile_search(nix, profile_id);
245 if (!nix_tm_is_leaf(nix, lvl)) {
246 /* Check if shaper profile exists for non leaf node */
247 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
248 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
250 /* Packet mode in profile should match with that of tm node */
251 if (profile && profile->pkt_mode != node->pkt_mode)
252 return NIX_ERR_TM_PKT_MODE_MISMATCH;
255 /* Check if there is second DWRR already in siblings or holes in prio */
256 rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
260 if (node->weight > roc_nix_tm_max_sched_wt_get())
261 return NIX_ERR_TM_WEIGHT_EXCEED;
263 /* Maintain minimum weight */
267 node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
269 node->max_prio = UINT32_MAX;
270 node->hw_id = NIX_TM_HW_ID_INVALID;
276 node->parent = parent_node;
278 parent_node->child_realloc = true;
279 node->parent_hw_id = NIX_TM_HW_ID_INVALID;
281 TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
282 plt_tm_dbg("Added node %s lvl %u id %u (%p)",
283 nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
288 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
290 struct mbox *mbox = (&nix->dev)->mbox;
291 struct nix_txschq_config *req;
292 struct nix_tm_node *p;
295 /* Enable nodes in path for flush to succeed */
296 if (!nix_tm_is_leaf(nix, node->lvl))
301 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
302 (p->flags & NIX_TM_NODE_HWRES)) {
303 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
304 req->lvl = p->hw_lvl;
305 req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
307 rc = mbox_process(mbox);
311 p->flags |= NIX_TM_NODE_ENABLED;
320 nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
323 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
324 enum roc_nix_tm_tree tree = nix->tm_tree;
325 struct mbox *mbox = (&nix->dev)->mbox;
326 struct nix_txschq_config *req = NULL;
327 struct nix_tm_node_list *list;
328 uint16_t link = nix->tx_link;
329 struct nix_tm_node *sq_node;
330 struct nix_tm_node *parent;
331 struct nix_tm_node *node;
335 sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
339 parent = sq_node->parent;
341 if (parent->lvl == ROC_TM_LVL_SCH2)
344 parent = parent->parent;
349 list = nix_tm_node_list(nix, tree);
351 if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
356 TAILQ_FOREACH(node, list, node) {
357 if (node->hw_lvl != nix->tm_link_cfg_lvl)
360 if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
363 if (node->hw_id != parent->hw_id)
367 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
368 req->lvl = nix->tm_link_cfg_lvl;
372 req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
373 req->regval[k] = enable ? tc : 0;
374 req->regval[k] |= enable ? BIT_ULL(13) : 0;
375 req->regval_mask[k] = ~(BIT_ULL(13) | GENMASK_ULL(7, 0));
378 if (k >= MAX_REGS_PER_MBOX_MSG) {
380 rc = mbox_process(mbox);
389 rc = mbox_process(mbox);
394 parent->rel_chan = enable ? tc : NIX_TM_CHAN_INVALID;
397 plt_err("Failed to %s bp on link %u, rc=%d(%s)",
398 enable ? "enable" : "disable", link, rc, roc_error_msg_get(rc));
403 nix_tm_bp_config_get(struct roc_nix *roc_nix, bool *is_enabled)
405 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
406 struct nix_txschq_config *req = NULL, *rsp;
407 enum roc_nix_tm_tree tree = nix->tm_tree;
408 struct mbox *mbox = (&nix->dev)->mbox;
409 struct nix_tm_node_list *list;
410 struct nix_tm_node *node;
417 list = nix_tm_node_list(nix, tree);
420 TAILQ_FOREACH(node, list, node) {
421 if (node->hw_lvl != nix->tm_link_cfg_lvl)
424 if (!(node->flags & NIX_TM_NODE_HWRES) || !node->bp_capa)
429 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
431 req->lvl = nix->tm_link_cfg_lvl;
435 req->reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(node->hw_id, link);
438 if (k >= MAX_REGS_PER_MBOX_MSG) {
440 rc = mbox_process_msg(mbox, (void **)&rsp);
441 if (rc || rsp->num_regs != k)
445 /* Report it as enabled only if enabled or all */
446 for (i = 0; i < k; i++)
447 enable &= !!(rsp->regval[i] & BIT_ULL(13));
453 rc = mbox_process_msg(mbox, (void **)&rsp);
456 /* Report it as enabled only if enabled or all */
457 for (i = 0; i < k; i++)
458 enable &= !!(rsp->regval[i] & BIT_ULL(13));
461 *is_enabled = found ? !!enable : false;
464 plt_err("Failed to get bp status on link %u, rc=%d(%s)", link, rc,
465 roc_error_msg_get(rc));
470 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
472 struct mbox *mbox = (&nix->dev)->mbox;
473 struct nix_txschq_config *req;
478 plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
479 enable ? "enable" : "disable");
481 rc = nix_tm_clear_path_xoff(nix, node);
485 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
486 req->lvl = NIX_TXSCH_LVL_SMQ;
489 req->reg[0] = NIX_AF_SMQX_CFG(smq);
490 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
491 req->regval_mask[0] =
492 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
494 return mbox_process(mbox);
498 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
501 struct nix_tm_node *node;
504 node = nix_tm_node_search(nix, sq, nix->tm_tree);
506 /* Check if we found a valid leaf node */
507 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
508 node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
512 /* Get SMQ Id of leaf node's parent */
513 *smq = node->parent->hw_id;
514 *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
516 rc = nix_tm_smq_xoff(nix, node->parent, false);
519 node->flags |= NIX_TM_NODE_ENABLED;
524 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
526 struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
527 uint16_t sqb_cnt, head_off, tail_off;
528 uint64_t wdata, val, prev;
529 uint16_t qid = sq->qid;
531 uint64_t timeout; /* 10's of usec */
533 /* Wait for enough time based on shaper min rate */
534 timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
535 /* Wait for worst case scenario of this SQ being last priority
536 * and so have to wait for all other SQ's drain out by their own.
538 timeout = timeout * nix->nb_tx_queues;
539 timeout = timeout / nix->tm_rate_min;
543 wdata = ((uint64_t)qid << 32);
544 regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
545 val = roc_atomic64_add_nosync(wdata, regaddr);
547 /* Spin multiple iterations as "sq->fc_cache_pkts" can still
548 * have space to send pkts even though fc_mem is disabled
554 val = roc_atomic64_add_nosync(wdata, regaddr);
555 /* Continue on error */
556 if (val & BIT_ULL(63))
562 sqb_cnt = val & 0xFFFF;
563 head_off = (val >> 20) & 0x3F;
564 tail_off = (val >> 28) & 0x3F;
566 /* SQ reached quiescent state */
567 if (sqb_cnt <= 1 && head_off == tail_off &&
568 (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
580 roc_nix_tm_dump(sq->roc_nix);
581 roc_nix_queues_ctx_dump(sq->roc_nix);
585 /* Flush and disable tx queue and its parent SMQ */
587 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
589 struct roc_nix *roc_nix = sq->roc_nix;
590 struct nix_tm_node *node, *sibling;
591 struct nix_tm_node_list *list;
592 enum roc_nix_tm_tree tree;
598 nix = roc_nix_to_nix_priv(roc_nix);
600 /* Need not do anything if tree is in disabled state */
601 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
604 mbox = (&nix->dev)->mbox;
608 list = nix_tm_node_list(nix, tree);
610 /* Find the node for this SQ */
611 node = nix_tm_node_search(nix, qid, tree);
612 if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
613 plt_err("Invalid node/state for sq %u", qid);
617 /* Enable CGX RXTX to drain pkts */
618 if (!roc_nix->io_enabled) {
619 /* Though it enables both RX MCAM Entries and CGX Link
620 * we assume all the rx queues are stopped way back.
622 mbox_alloc_msg_nix_lf_start_rx(mbox);
623 rc = mbox_process(mbox);
625 plt_err("cgx start failed, rc=%d", rc);
630 /* Disable backpressure */
631 rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
633 plt_err("Failed to disable backpressure for flush, rc=%d", rc);
637 /* Disable smq xoff for case it was enabled earlier */
638 rc = nix_tm_smq_xoff(nix, node->parent, false);
640 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
645 /* As per HRM, to disable an SQ, all other SQ's
646 * that feed to same SMQ must be paused before SMQ flush.
648 TAILQ_FOREACH(sibling, list, node) {
649 if (sibling->parent != node->parent)
651 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
659 rc = roc_nix_tm_sq_aura_fc(sq, false);
661 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
665 /* Wait for sq entries to be flushed */
666 rc = roc_nix_tm_sq_flush_spin(sq);
668 plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
673 node->flags &= ~NIX_TM_NODE_ENABLED;
675 /* Disable and flush */
676 rc = nix_tm_smq_xoff(nix, node->parent, true);
678 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
683 /* Restore cgx state */
684 if (!roc_nix->io_enabled) {
685 mbox_alloc_msg_nix_lf_stop_rx(mbox);
686 rc |= mbox_process(mbox);
693 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
695 struct roc_nix *roc_nix = sq->roc_nix;
696 struct nix_tm_node *node, *sibling;
697 struct nix_tm_node_list *list;
698 enum roc_nix_tm_tree tree;
699 struct roc_nix_sq *s_sq;
705 nix = roc_nix_to_nix_priv(roc_nix);
707 /* Need not do anything if tree is in disabled state */
708 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
713 list = nix_tm_node_list(nix, tree);
715 /* Find the node for this SQ */
716 node = nix_tm_node_search(nix, qid, tree);
718 plt_err("Invalid node for sq %u", qid);
722 /* Enable all the siblings back */
723 TAILQ_FOREACH(sibling, list, node) {
724 if (sibling->parent != node->parent)
727 if (sibling->id == qid)
730 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
734 s_sq = nix->sqs[s_qid];
739 /* Enable back if any SQ is still present */
740 rc = nix_tm_smq_xoff(nix, node->parent, false);
742 plt_err("Failed to enable smq %u, rc=%d",
743 node->parent->hw_id, rc);
749 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
751 plt_err("Failed to enable sqb aura fc, rc=%d", rc);
759 /* Restore backpressure */
760 rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, true);
762 plt_err("Failed to restore backpressure, rc=%d", rc);
770 nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
771 bool rr_quantum_only)
773 struct mbox *mbox = (&nix->dev)->mbox;
774 uint16_t qid = node->id, smq;
778 smq = node->parent->hw_id;
779 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
782 plt_tm_dbg("Update sq(%u) rr_quantum 0x%" PRIx64, qid,
785 plt_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%" PRIx64,
786 qid, smq, rr_quantum);
788 if (qid > nix->nb_tx_queues)
791 if (roc_model_is_cn9k()) {
792 struct nix_aq_enq_req *aq;
794 aq = mbox_alloc_msg_nix_aq_enq(mbox);
799 aq->ctype = NIX_AQ_CTYPE_SQ;
800 aq->op = NIX_AQ_INSTOP_WRITE;
802 /* smq update only when needed */
803 if (!rr_quantum_only) {
805 aq->sq_mask.smq = ~aq->sq_mask.smq;
807 aq->sq.smq_rr_quantum = rr_quantum;
808 aq->sq_mask.smq_rr_quantum = ~aq->sq_mask.smq_rr_quantum;
810 struct nix_cn10k_aq_enq_req *aq;
812 aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
817 aq->ctype = NIX_AQ_CTYPE_SQ;
818 aq->op = NIX_AQ_INSTOP_WRITE;
820 /* smq update only when needed */
821 if (!rr_quantum_only) {
823 aq->sq_mask.smq = ~aq->sq_mask.smq;
825 aq->sq.smq_rr_weight = rr_quantum;
826 aq->sq_mask.smq_rr_weight = ~aq->sq_mask.smq_rr_weight;
829 rc = mbox_process(mbox);
831 plt_err("Failed to set smq, rc=%d", rc);
836 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
839 uint16_t avail, thresh, to_free = 0, schq;
840 struct mbox *mbox = (&nix->dev)->mbox;
841 struct nix_txsch_free_req *req;
842 struct plt_bitmap *bmp;
847 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
849 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
850 plt_bitmap_scan_init(bmp);
852 avail = nix_tm_resource_avail(nix, hw_lvl, contig);
855 /* Release only above threshold */
857 to_free = avail - thresh;
859 /* Release everything */
863 /* Now release resources to AF */
865 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
868 schq = bitmap_ctzll(slab);
869 slab &= ~(1ULL << schq);
873 req = mbox_alloc_msg_nix_txsch_free(mbox);
877 req->schq_lvl = hw_lvl;
879 rc = mbox_process(mbox);
881 plt_err("failed to release hwres %s(%u) rc %d",
882 nix_tm_hwlvl2str(hw_lvl), schq, rc);
886 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
888 plt_bitmap_clear(bmp, schq);
893 plt_err("resource inconsistency for %s(%u)",
894 nix_tm_hwlvl2str(hw_lvl), contig);
901 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
903 struct mbox *mbox = (&nix->dev)->mbox;
904 struct nix_txsch_free_req *req;
905 struct plt_bitmap *bmp;
906 uint16_t avail, hw_id;
910 hw_lvl = node->hw_lvl;
912 bmp = nix->schq_bmp[hw_lvl];
913 /* Free specific HW resource */
914 plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
915 nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
918 avail = nix_tm_resource_avail(nix, hw_lvl, false);
919 /* Always for now free to discontiguous queue when avail
922 if (nix->discontig_rsvd[hw_lvl] &&
923 avail < nix->discontig_rsvd[hw_lvl]) {
924 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
925 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
926 plt_bitmap_set(bmp, hw_id);
927 node->hw_id = NIX_TM_HW_ID_INVALID;
928 node->flags &= ~NIX_TM_NODE_HWRES;
933 req = mbox_alloc_msg_nix_txsch_free(mbox);
937 req->schq_lvl = node->hw_lvl;
939 rc = mbox_process(mbox);
941 plt_err("failed to release hwres %s(%u) rc %d",
942 nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
946 /* Mark parent as dirty for reallocing it's children */
948 node->parent->child_realloc = true;
950 node->hw_id = NIX_TM_HW_ID_INVALID;
951 node->flags &= ~NIX_TM_NODE_HWRES;
952 plt_tm_dbg("Released hwres %s(%u) to af",
953 nix_tm_hwlvl2str(node->hw_lvl), hw_id);
958 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
959 enum roc_nix_tm_tree tree, bool free)
961 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
962 struct nix_tm_shaper_profile *profile;
963 struct nix_tm_node *node, *child;
964 struct nix_tm_node_list *list;
968 plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
970 node = nix_tm_node_search(nix, node_id, tree);
972 return NIX_ERR_TM_INVALID_NODE;
974 list = nix_tm_node_list(nix, tree);
975 /* Check for any existing children */
976 TAILQ_FOREACH(child, list, node) {
977 if (child->parent == node)
978 return NIX_ERR_TM_CHILD_EXISTS;
981 /* Remove shaper profile reference */
982 profile_id = node->shaper_profile_id;
983 profile = nix_tm_shaper_profile_search(nix, profile_id);
985 /* Free hw resource locally */
986 if (node->flags & NIX_TM_NODE_HWRES) {
987 rc = nix_tm_free_node_resource(nix, node);
995 TAILQ_REMOVE(list, node, node);
997 plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
998 "parent %u profile 0x%x tree %u (%p)",
999 nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
1000 node->priority, node->weight,
1001 node->parent ? node->parent->id : UINT32_MAX,
1002 node->shaper_profile_id, tree, node);
1003 /* Free only if requested */
1005 nix_tm_node_free(node);
1010 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
1011 uint16_t *contig_id, int *contig_cnt,
1012 struct nix_tm_node_list *list)
1014 struct nix_tm_node *child;
1015 struct plt_bitmap *bmp;
1016 uint8_t child_hw_lvl;
1017 int spare_schq = -1;
1022 child_hw_lvl = parent->hw_lvl - 1;
1023 bmp = nix->schq_bmp[child_hw_lvl];
1024 plt_bitmap_scan_init(bmp);
1027 /* Save spare schq if it is case of RR + SP */
1028 if (parent->rr_prio != 0xf && *contig_cnt > 1)
1029 spare_schq = *contig_id + parent->rr_prio;
1031 TAILQ_FOREACH(child, list, node) {
1034 if (child->parent->id != parent->id)
1037 /* Resource never expected to be present */
1038 if (child->flags & NIX_TM_NODE_HWRES) {
1039 plt_err("Resource exists for child (%s)%u, id %u (%p)",
1040 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
1046 plt_bitmap_scan(bmp, &pos, &slab);
1048 if (child->priority == parent->rr_prio && spare_schq != -1) {
1049 /* Use spare schq first if present */
1052 *contig_cnt = *contig_cnt - 1;
1054 } else if (child->priority == parent->rr_prio) {
1055 /* Assign a discontiguous queue */
1057 plt_err("Schq not found for Child %u "
1059 child->id, child->lvl, child);
1063 schq = bitmap_ctzll(slab);
1064 slab &= ~(1ULL << schq);
1066 plt_bitmap_clear(bmp, schq);
1068 /* Assign a contiguous queue */
1069 schq = *contig_id + child->priority;
1070 *contig_cnt = *contig_cnt - 1;
1073 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
1074 nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
1077 child->hw_id = schq;
1078 child->parent_hw_id = parent->hw_id;
1079 child->flags |= NIX_TM_NODE_HWRES;
1086 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
1088 struct nix_tm_node *parent, *root = NULL;
1089 struct plt_bitmap *bmp, *bmp_contig;
1090 struct nix_tm_node_list *list;
1091 uint8_t child_hw_lvl, hw_lvl;
1092 uint16_t contig_id, j;
1097 list = nix_tm_node_list(nix, tree);
1098 /* Walk from TL1 to TL4 parents */
1099 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
1100 TAILQ_FOREACH(parent, list, node) {
1101 child_hw_lvl = parent->hw_lvl - 1;
1102 if (parent->hw_lvl != hw_lvl)
1105 /* Remember root for future */
1106 if (parent->hw_lvl == nix->tm_root_lvl)
1109 if (!parent->child_realloc) {
1110 /* Skip when parent is not dirty */
1111 if (nix_tm_child_res_valid(list, parent))
1113 plt_err("Parent not dirty but invalid "
1114 "child res parent id %u(lvl %u)",
1115 parent->id, parent->lvl);
1119 bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
1121 /* Prealloc contiguous indices for a parent */
1122 contig_id = NIX_TM_MAX_HW_TXSCHQ;
1123 cnt = (int)parent->max_prio + 1;
1125 plt_bitmap_scan_init(bmp_contig);
1126 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
1127 plt_err("Contig schq not found");
1130 contig_id = pos + bitmap_ctzll(slab);
1132 /* Check if we have enough */
1133 for (j = contig_id; j < contig_id + cnt; j++) {
1134 if (!plt_bitmap_get(bmp_contig, j))
1138 if (j != contig_id + cnt) {
1139 plt_err("Contig schq not sufficient");
1143 for (j = contig_id; j < contig_id + cnt; j++)
1144 plt_bitmap_clear(bmp_contig, j);
1147 /* Assign hw id to all children */
1148 rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
1151 plt_err("Unexpected err, contig res alloc, "
1152 "parent %u, of %s, rc=%d, cnt=%d",
1153 parent->id, nix_tm_hwlvl2str(hw_lvl),
1158 /* Clear the dirty bit as children's
1159 * resources are reallocated.
1161 parent->child_realloc = false;
1165 /* Root is always expected to be there */
1169 if (root->flags & NIX_TM_NODE_HWRES)
1172 /* Process root node */
1173 bmp = nix->schq_bmp[nix->tm_root_lvl];
1174 plt_bitmap_scan_init(bmp);
1175 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1176 plt_err("Resource not allocated for root");
1180 root->hw_id = pos + bitmap_ctzll(slab);
1181 root->flags |= NIX_TM_NODE_HWRES;
1182 plt_bitmap_clear(bmp, root->hw_id);
1184 /* Get TL1 id as well when root is not TL1 */
1185 if (!nix_tm_have_tl1_access(nix)) {
1186 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
1188 plt_bitmap_scan_init(bmp);
1189 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
1190 plt_err("Resource not found for TL1");
1193 root->parent_hw_id = pos + bitmap_ctzll(slab);
1194 plt_bitmap_clear(bmp, root->parent_hw_id);
1197 plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
1198 nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
1204 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
1209 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1210 for (i = 0; i < rsp->schq[lvl]; i++)
1211 plt_bitmap_set(nix->schq_bmp[lvl],
1212 rsp->schq_list[lvl][i]);
1214 for (i = 0; i < rsp->schq_contig[lvl]; i++)
1215 plt_bitmap_set(nix->schq_contig_bmp[lvl],
1216 rsp->schq_contig_list[lvl][i]);
1221 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
1223 uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
1224 struct mbox *mbox = (&nix->dev)->mbox;
1225 uint16_t schq[NIX_TXSCH_LVL_CNT];
1226 struct nix_txsch_alloc_req *req;
1227 struct nix_txsch_alloc_rsp *rsp;
1232 memset(schq, 0, sizeof(schq));
1233 memset(schq_contig, 0, sizeof(schq_contig));
1235 /* Estimate requirement */
1236 rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
1240 /* Release existing contiguous resources when realloc requested
1241 * as there is no way to guarantee continuity of old with new.
1243 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1244 if (schq_contig[hw_lvl])
1245 nix_tm_release_resources(nix, hw_lvl, true, false);
1248 /* Alloc as needed */
1251 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
1256 mbox_memcpy(req->schq, schq, sizeof(req->schq));
1257 mbox_memcpy(req->schq_contig, schq_contig,
1258 sizeof(req->schq_contig));
1260 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
1261 * So split alloc to multiple requests.
1263 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1264 if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
1265 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
1266 schq[i] -= req->schq[i];
1268 if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
1269 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
1270 schq_contig[i] -= req->schq_contig[i];
1272 if (schq[i] || schq_contig[i])
1276 rc = mbox_process_msg(mbox, (void *)&rsp);
1280 nix_tm_copy_rsp_to_nix(nix, rsp);
1283 nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
1286 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1287 if (nix_tm_release_resources(nix, i, true, false))
1288 plt_err("Failed to release contig resources of "
1291 if (nix_tm_release_resources(nix, i, false, false))
1292 plt_err("Failed to release discontig resources of "
1300 nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
1302 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1303 uint32_t nonleaf_id = nix->nb_tx_queues;
1304 struct nix_tm_node *node = NULL;
1305 uint8_t leaf_lvl, lvl, lvl_end;
1309 /* Add ROOT, SCH1, SCH2, SCH3, [SCH4] nodes */
1310 parent = ROC_NIX_TM_NODE_ID_INVALID;
1311 /* With TL1 access we have an extra level */
1312 lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
1315 for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1317 node = nix_tm_node_alloc();
1321 node->id = nonleaf_id;
1322 node->parent_id = parent;
1324 node->weight = NIX_TM_DFLT_RR_WT;
1325 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1327 node->tree = ROC_NIX_TM_DEFAULT;
1328 node->rel_chan = NIX_TM_CHAN_INVALID;
1330 rc = nix_tm_node_add(roc_nix, node);
1333 parent = nonleaf_id;
1337 parent = nonleaf_id - 1;
1338 leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1341 /* Add leaf nodes */
1342 for (i = 0; i < nix->nb_tx_queues; i++) {
1344 node = nix_tm_node_alloc();
1349 node->parent_id = parent;
1351 node->weight = NIX_TM_DFLT_RR_WT;
1352 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1353 node->lvl = leaf_lvl;
1354 node->tree = ROC_NIX_TM_DEFAULT;
1355 node->rel_chan = NIX_TM_CHAN_INVALID;
1357 rc = nix_tm_node_add(roc_nix, node);
1364 nix_tm_node_free(node);
1369 roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
1371 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1372 uint32_t nonleaf_id = nix->nb_tx_queues;
1373 struct nix_tm_node *node = NULL;
1374 uint8_t leaf_lvl, lvl, lvl_end;
1378 /* Add ROOT, SCH1, SCH2 nodes */
1379 parent = ROC_NIX_TM_NODE_ID_INVALID;
1380 lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
1383 for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
1385 node = nix_tm_node_alloc();
1389 node->id = nonleaf_id;
1390 node->parent_id = parent;
1392 node->weight = NIX_TM_DFLT_RR_WT;
1393 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1395 node->tree = ROC_NIX_TM_RLIMIT;
1396 node->rel_chan = NIX_TM_CHAN_INVALID;
1398 rc = nix_tm_node_add(roc_nix, node);
1401 parent = nonleaf_id;
1405 /* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
1406 lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
1408 /* Add per queue SMQ nodes i.e SCH4 / SCH3 */
1409 for (i = 0; i < nix->nb_tx_queues; i++) {
1411 node = nix_tm_node_alloc();
1415 node->id = nonleaf_id + i;
1416 node->parent_id = parent;
1418 node->weight = NIX_TM_DFLT_RR_WT;
1419 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1421 node->tree = ROC_NIX_TM_RLIMIT;
1422 node->rel_chan = NIX_TM_CHAN_INVALID;
1424 rc = nix_tm_node_add(roc_nix, node);
1429 parent = nonleaf_id;
1430 leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
1433 /* Add leaf nodes */
1434 for (i = 0; i < nix->nb_tx_queues; i++) {
1436 node = nix_tm_node_alloc();
1441 node->parent_id = parent + i;
1443 node->weight = NIX_TM_DFLT_RR_WT;
1444 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1445 node->lvl = leaf_lvl;
1446 node->tree = ROC_NIX_TM_RLIMIT;
1447 node->rel_chan = NIX_TM_CHAN_INVALID;
1449 rc = nix_tm_node_add(roc_nix, node);
1456 nix_tm_node_free(node);
1461 roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix)
1463 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1464 uint32_t nonleaf_id = nix->nb_tx_queues;
1465 struct nix_tm_node *node = NULL;
1466 uint8_t leaf_lvl, lvl, lvl_end;
1467 uint32_t tl2_node_id;
1471 parent = ROC_NIX_TM_NODE_ID_INVALID;
1472 lvl_end = ROC_TM_LVL_SCH3;
1473 leaf_lvl = ROC_TM_LVL_QUEUE;
1476 node = nix_tm_node_alloc();
1480 node->id = nonleaf_id;
1481 node->parent_id = parent;
1483 node->weight = NIX_TM_DFLT_RR_WT;
1484 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1485 node->lvl = ROC_TM_LVL_ROOT;
1486 node->tree = ROC_NIX_TM_PFC;
1487 node->rel_chan = NIX_TM_CHAN_INVALID;
1489 rc = nix_tm_node_add(roc_nix, node);
1493 parent = nonleaf_id;
1498 node = nix_tm_node_alloc();
1502 node->id = nonleaf_id;
1503 node->parent_id = parent;
1505 node->weight = NIX_TM_DFLT_RR_WT;
1506 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1507 node->lvl = ROC_TM_LVL_SCH1;
1508 node->tree = ROC_NIX_TM_PFC;
1509 node->rel_chan = NIX_TM_CHAN_INVALID;
1511 rc = nix_tm_node_add(roc_nix, node);
1515 tl2_node_id = nonleaf_id;
1518 for (i = 0; i < nix->nb_tx_queues; i++) {
1519 parent = tl2_node_id;
1520 for (lvl = ROC_TM_LVL_SCH2; lvl <= lvl_end; lvl++) {
1522 node = nix_tm_node_alloc();
1526 node->id = nonleaf_id;
1527 node->parent_id = parent;
1529 node->weight = NIX_TM_DFLT_RR_WT;
1530 node->shaper_profile_id =
1531 ROC_NIX_TM_SHAPER_PROFILE_NONE;
1533 node->tree = ROC_NIX_TM_PFC;
1534 node->rel_chan = NIX_TM_CHAN_INVALID;
1536 rc = nix_tm_node_add(roc_nix, node);
1540 parent = nonleaf_id;
1544 lvl = ROC_TM_LVL_SCH4;
1547 node = nix_tm_node_alloc();
1551 node->id = nonleaf_id;
1552 node->parent_id = parent;
1554 node->weight = NIX_TM_DFLT_RR_WT;
1555 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1557 node->tree = ROC_NIX_TM_PFC;
1558 node->rel_chan = NIX_TM_CHAN_INVALID;
1560 rc = nix_tm_node_add(roc_nix, node);
1564 parent = nonleaf_id;
1568 node = nix_tm_node_alloc();
1573 node->parent_id = parent;
1575 node->weight = NIX_TM_DFLT_RR_WT;
1576 node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
1577 node->lvl = leaf_lvl;
1578 node->tree = ROC_NIX_TM_PFC;
1579 node->rel_chan = NIX_TM_CHAN_INVALID;
1581 rc = nix_tm_node_add(roc_nix, node);
1588 nix_tm_node_free(node);
1593 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
1595 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1596 struct nix_tm_shaper_profile *profile;
1597 struct nix_tm_node *node, *next_node;
1598 struct nix_tm_node_list *list;
1599 enum roc_nix_tm_tree tree;
1600 uint32_t profile_id;
1603 for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
1604 if (!(tree_mask & BIT(tree)))
1607 plt_tm_dbg("Freeing resources of tree %u", tree);
1609 list = nix_tm_node_list(nix, tree);
1610 next_node = TAILQ_FIRST(list);
1613 next_node = TAILQ_NEXT(node, node);
1615 if (!nix_tm_is_leaf(nix, node->lvl) &&
1616 node->flags & NIX_TM_NODE_HWRES) {
1617 /* Clear xoff in path for flush to succeed */
1618 rc = nix_tm_clear_path_xoff(nix, node);
1621 rc = nix_tm_free_node_resource(nix, node);
1627 /* Leave software elements if needed */
1631 next_node = TAILQ_FIRST(list);
1634 next_node = TAILQ_NEXT(node, node);
1636 plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
1639 profile_id = node->shaper_profile_id;
1640 profile = nix_tm_shaper_profile_search(nix, profile_id);
1644 TAILQ_REMOVE(list, node, node);
1645 nix_tm_node_free(node);
1652 nix_tm_conf_init(struct roc_nix *roc_nix)
1654 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1655 uint32_t bmp_sz, hw_lvl;
1659 PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1660 PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1661 ROC_NIX_TM_SHAPER_PROFILE_SZ);
1664 for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1665 TAILQ_INIT(&nix->trees[i]);
1667 TAILQ_INIT(&nix->shaper_profile_list);
1668 nix->tm_rate_min = 1E9; /* 1Gbps */
1671 bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1672 bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1675 nix->schq_bmp_mem = bmp_mem;
1677 /* Init contiguous and discontiguous bitmap per lvl */
1679 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1680 /* Bitmap for discontiguous resource */
1681 nix->schq_bmp[hw_lvl] =
1682 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1683 if (!nix->schq_bmp[hw_lvl])
1686 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1688 /* Bitmap for contiguous resource */
1689 nix->schq_contig_bmp[hw_lvl] =
1690 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1691 if (!nix->schq_contig_bmp[hw_lvl])
1694 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1697 rc = nix_tm_mark_init(nix);
1701 /* Disable TL1 Static Priority when VF's are enabled
1702 * as otherwise VF's TL2 reallocation will be needed
1703 * runtime to support a specific topology of PF.
1705 if (nix->pci_dev->max_vfs)
1706 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1708 /* TL1 access is only for PF's */
1709 if (roc_nix_is_pf(roc_nix)) {
1710 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1711 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1713 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1718 nix_tm_conf_fini(roc_nix);
1723 nix_tm_conf_fini(struct roc_nix *roc_nix)
1725 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1728 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1729 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1730 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1732 plt_free(nix->schq_bmp_mem);