1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 bitmap_ctzll(uint64_t slab)
14 return __builtin_ctzll(slab);
18 nix_tm_clear_shaper_profiles(struct nix *nix)
20 struct nix_tm_shaper_profile *shaper_profile;
22 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
23 while (shaper_profile != NULL) {
24 if (shaper_profile->ref_cnt)
25 plt_warn("Shaper profile %u has non zero references",
27 TAILQ_REMOVE(&nix->shaper_profile_list, shaper_profile, shaper);
28 nix_tm_shaper_profile_free(shaper_profile);
29 shaper_profile = TAILQ_FIRST(&nix->shaper_profile_list);
34 nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
36 struct nix_tm_node *child, *parent;
37 struct nix_tm_node_list *list;
38 uint32_t rr_prio, max_prio;
41 list = nix_tm_node_list(nix, tree);
43 /* Release all the node hw resources locally
44 * if parent marked as dirty and resource exists.
46 TAILQ_FOREACH(child, list, node) {
47 /* Release resource only if parent direct hierarchy changed */
48 if (child->flags & NIX_TM_NODE_HWRES && child->parent &&
49 child->parent->child_realloc) {
50 nix_tm_free_node_resource(nix, child);
52 child->max_prio = UINT32_MAX;
55 TAILQ_FOREACH(parent, list, node) {
56 /* Count group of children of same priority i.e are RR */
57 rr_num = nix_tm_check_rr(nix, parent->id, tree, &rr_prio,
60 /* Assuming that multiple RR groups are
61 * not configured based on capability.
63 parent->rr_prio = rr_prio;
64 parent->rr_num = rr_num;
65 parent->max_prio = max_prio;
72 nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
74 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
75 struct nix_tm_shaper_profile *profile;
76 uint32_t node_id, parent_id, lvl;
77 struct nix_tm_node *parent_node;
78 uint32_t priority, profile_id;
79 uint8_t hw_lvl, exp_next_lvl;
80 enum roc_nix_tm_tree tree;
84 priority = node->priority;
85 parent_id = node->parent_id;
86 profile_id = node->shaper_profile_id;
90 plt_tm_dbg("Add node %s lvl %u id %u, prio 0x%x weight 0x%x "
91 "parent %u profile 0x%x tree %u",
92 nix_tm_hwlvl2str(nix_tm_lvl2nix(nix, lvl)), lvl, node_id,
93 priority, node->weight, parent_id, profile_id, tree);
95 if (tree >= ROC_NIX_TM_TREE_MAX)
98 /* Translate sw level id's to nix hw level id's */
99 hw_lvl = nix_tm_lvl2nix(nix, lvl);
100 if (hw_lvl == NIX_TXSCH_LVL_CNT && !nix_tm_is_leaf(nix, lvl))
101 return NIX_ERR_TM_INVALID_LVL;
103 /* Leaf nodes have to be same priority */
104 if (nix_tm_is_leaf(nix, lvl) && priority != 0)
105 return NIX_ERR_TM_INVALID_PRIO;
107 parent_node = nix_tm_node_search(nix, parent_id, tree);
109 if (node_id < nix->nb_tx_queues)
110 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
112 exp_next_lvl = hw_lvl + 1;
114 /* Check if there is no parent node yet */
115 if (hw_lvl != nix->tm_root_lvl &&
116 (!parent_node || parent_node->hw_lvl != exp_next_lvl))
117 return NIX_ERR_TM_INVALID_PARENT;
119 /* Check if a node already exists */
120 if (nix_tm_node_search(nix, node_id, tree))
121 return NIX_ERR_TM_NODE_EXISTS;
123 profile = nix_tm_shaper_profile_search(nix, profile_id);
124 if (!nix_tm_is_leaf(nix, lvl)) {
125 /* Check if shaper profile exists for non leaf node */
126 if (!profile && profile_id != ROC_NIX_TM_SHAPER_PROFILE_NONE)
127 return NIX_ERR_TM_INVALID_SHAPER_PROFILE;
129 /* Packet mode in profile should match with that of tm node */
130 if (profile && profile->pkt_mode != node->pkt_mode)
131 return NIX_ERR_TM_PKT_MODE_MISMATCH;
134 /* Check if there is second DWRR already in siblings or holes in prio */
135 rc = nix_tm_validate_prio(nix, lvl, parent_id, priority, tree);
139 if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
140 return NIX_ERR_TM_WEIGHT_EXCEED;
142 /* Maintain minimum weight */
146 node->hw_lvl = nix_tm_lvl2nix(nix, lvl);
148 node->max_prio = UINT32_MAX;
149 node->hw_id = NIX_TM_HW_ID_INVALID;
155 node->parent = parent_node;
157 parent_node->child_realloc = true;
158 node->parent_hw_id = NIX_TM_HW_ID_INVALID;
160 TAILQ_INSERT_TAIL(&nix->trees[tree], node, node);
161 plt_tm_dbg("Added node %s lvl %u id %u (%p)",
162 nix_tm_hwlvl2str(node->hw_lvl), lvl, node_id, node);
167 nix_tm_clear_path_xoff(struct nix *nix, struct nix_tm_node *node)
169 struct mbox *mbox = (&nix->dev)->mbox;
170 struct nix_txschq_config *req;
171 struct nix_tm_node *p;
174 /* Enable nodes in path for flush to succeed */
175 if (!nix_tm_is_leaf(nix, node->lvl))
180 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
181 (p->flags & NIX_TM_NODE_HWRES)) {
182 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
183 req->lvl = p->hw_lvl;
184 req->num_regs = nix_tm_sw_xoff_prep(p, false, req->reg,
186 rc = mbox_process(mbox);
190 p->flags |= NIX_TM_NODE_ENABLED;
199 nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable)
201 struct mbox *mbox = (&nix->dev)->mbox;
202 struct nix_txschq_config *req;
207 plt_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
208 enable ? "enable" : "disable");
210 rc = nix_tm_clear_path_xoff(nix, node);
214 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
215 req->lvl = NIX_TXSCH_LVL_SMQ;
218 req->reg[0] = NIX_AF_SMQX_CFG(smq);
219 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
220 req->regval_mask[0] =
221 enable ? ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
223 return mbox_process(mbox);
227 nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
230 struct nix_tm_node *node;
233 node = nix_tm_node_search(nix, sq, nix->tm_tree);
235 /* Check if we found a valid leaf node */
236 if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
237 node->parent->hw_id == NIX_TM_HW_ID_INVALID) {
241 /* Get SMQ Id of leaf node's parent */
242 *smq = node->parent->hw_id;
243 *rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
245 rc = nix_tm_smq_xoff(nix, node->parent, false);
248 node->flags |= NIX_TM_NODE_ENABLED;
253 roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
255 struct nix *nix = roc_nix_to_nix_priv(sq->roc_nix);
256 uint16_t sqb_cnt, head_off, tail_off;
257 uint64_t wdata, val, prev;
258 uint16_t qid = sq->qid;
260 uint64_t timeout; /* 10's of usec */
262 /* Wait for enough time based on shaper min rate */
263 timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5);
264 /* Wait for worst case scenario of this SQ being last priority
265 * and so have to wait for all other SQ's drain out by their own.
267 timeout = timeout * nix->nb_tx_queues;
268 timeout = timeout / nix->tm_rate_min;
272 wdata = ((uint64_t)qid << 32);
273 regaddr = (int64_t *)(nix->base + NIX_LF_SQ_OP_STATUS);
274 val = roc_atomic64_add_nosync(wdata, regaddr);
276 /* Spin multiple iterations as "sq->fc_cache_pkts" can still
277 * have space to send pkts even though fc_mem is disabled
283 val = roc_atomic64_add_nosync(wdata, regaddr);
284 /* Continue on error */
285 if (val & BIT_ULL(63))
291 sqb_cnt = val & 0xFFFF;
292 head_off = (val >> 20) & 0x3F;
293 tail_off = (val >> 28) & 0x3F;
295 /* SQ reached quiescent state */
296 if (sqb_cnt <= 1 && head_off == tail_off &&
297 (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
309 roc_nix_queues_ctx_dump(sq->roc_nix);
313 /* Flush and disable tx queue and its parent SMQ */
315 nix_tm_sq_flush_pre(struct roc_nix_sq *sq)
317 struct roc_nix *roc_nix = sq->roc_nix;
318 struct nix_tm_node *node, *sibling;
319 struct nix_tm_node_list *list;
320 enum roc_nix_tm_tree tree;
326 nix = roc_nix_to_nix_priv(roc_nix);
328 /* Need not do anything if tree is in disabled state */
329 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
332 mbox = (&nix->dev)->mbox;
336 list = nix_tm_node_list(nix, tree);
338 /* Find the node for this SQ */
339 node = nix_tm_node_search(nix, qid, tree);
340 if (!node || !(node->flags & NIX_TM_NODE_ENABLED)) {
341 plt_err("Invalid node/state for sq %u", qid);
345 /* Enable CGX RXTX to drain pkts */
346 if (!roc_nix->io_enabled) {
347 /* Though it enables both RX MCAM Entries and CGX Link
348 * we assume all the rx queues are stopped way back.
350 mbox_alloc_msg_nix_lf_start_rx(mbox);
351 rc = mbox_process(mbox);
353 plt_err("cgx start failed, rc=%d", rc);
358 /* Disable smq xoff for case it was enabled earlier */
359 rc = nix_tm_smq_xoff(nix, node->parent, false);
361 plt_err("Failed to enable smq %u, rc=%d", node->parent->hw_id,
366 /* As per HRM, to disable an SQ, all other SQ's
367 * that feed to same SMQ must be paused before SMQ flush.
369 TAILQ_FOREACH(sibling, list, node) {
370 if (sibling->parent != node->parent)
372 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
380 rc = roc_nix_tm_sq_aura_fc(sq, false);
382 plt_err("Failed to disable sqb aura fc, rc=%d", rc);
386 /* Wait for sq entries to be flushed */
387 rc = roc_nix_tm_sq_flush_spin(sq);
389 plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc);
394 node->flags &= ~NIX_TM_NODE_ENABLED;
396 /* Disable and flush */
397 rc = nix_tm_smq_xoff(nix, node->parent, true);
399 plt_err("Failed to disable smq %u, rc=%d", node->parent->hw_id,
404 /* Restore cgx state */
405 if (!roc_nix->io_enabled) {
406 mbox_alloc_msg_nix_lf_stop_rx(mbox);
407 rc |= mbox_process(mbox);
414 nix_tm_sq_flush_post(struct roc_nix_sq *sq)
416 struct roc_nix *roc_nix = sq->roc_nix;
417 struct nix_tm_node *node, *sibling;
418 struct nix_tm_node_list *list;
419 enum roc_nix_tm_tree tree;
420 struct roc_nix_sq *s_sq;
426 nix = roc_nix_to_nix_priv(roc_nix);
428 /* Need not do anything if tree is in disabled state */
429 if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
434 list = nix_tm_node_list(nix, tree);
436 /* Find the node for this SQ */
437 node = nix_tm_node_search(nix, qid, tree);
439 plt_err("Invalid node for sq %u", qid);
443 /* Enable all the siblings back */
444 TAILQ_FOREACH(sibling, list, node) {
445 if (sibling->parent != node->parent)
448 if (sibling->id == qid)
451 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
455 s_sq = nix->sqs[s_qid];
460 /* Enable back if any SQ is still present */
461 rc = nix_tm_smq_xoff(nix, node->parent, false);
463 plt_err("Failed to enable smq %u, rc=%d",
464 node->parent->hw_id, rc);
470 rc = roc_nix_tm_sq_aura_fc(s_sq, true);
472 plt_err("Failed to enable sqb aura fc, rc=%d", rc);
481 nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
484 uint16_t avail, thresh, to_free = 0, schq;
485 struct mbox *mbox = (&nix->dev)->mbox;
486 struct nix_txsch_free_req *req;
487 struct plt_bitmap *bmp;
492 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
494 contig ? nix->contig_rsvd[hw_lvl] : nix->discontig_rsvd[hw_lvl];
495 plt_bitmap_scan_init(bmp);
497 avail = nix_tm_resource_avail(nix, hw_lvl, contig);
500 /* Release only above threshold */
502 to_free = avail - thresh;
504 /* Release everything */
508 /* Now release resources to AF */
510 if (!slab && !plt_bitmap_scan(bmp, &pos, &slab))
513 schq = bitmap_ctzll(slab);
514 slab &= ~(1ULL << schq);
518 req = mbox_alloc_msg_nix_txsch_free(mbox);
522 req->schq_lvl = hw_lvl;
524 rc = mbox_process(mbox);
526 plt_err("failed to release hwres %s(%u) rc %d",
527 nix_tm_hwlvl2str(hw_lvl), schq, rc);
531 plt_tm_dbg("Released hwres %s(%u)", nix_tm_hwlvl2str(hw_lvl),
533 plt_bitmap_clear(bmp, schq);
538 plt_err("resource inconsistency for %s(%u)",
539 nix_tm_hwlvl2str(hw_lvl), contig);
546 nix_tm_free_node_resource(struct nix *nix, struct nix_tm_node *node)
548 struct mbox *mbox = (&nix->dev)->mbox;
549 struct nix_txsch_free_req *req;
550 struct plt_bitmap *bmp;
551 uint16_t avail, hw_id;
555 hw_lvl = node->hw_lvl;
557 bmp = nix->schq_bmp[hw_lvl];
558 /* Free specific HW resource */
559 plt_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
560 nix_tm_hwlvl2str(node->hw_lvl), hw_id, node->lvl, node->id,
563 avail = nix_tm_resource_avail(nix, hw_lvl, false);
564 /* Always for now free to discontiguous queue when avail
567 if (nix->discontig_rsvd[hw_lvl] &&
568 avail < nix->discontig_rsvd[hw_lvl]) {
569 PLT_ASSERT(hw_id < NIX_TM_MAX_HW_TXSCHQ);
570 PLT_ASSERT(plt_bitmap_get(bmp, hw_id) == 0);
571 plt_bitmap_set(bmp, hw_id);
572 node->hw_id = NIX_TM_HW_ID_INVALID;
573 node->flags &= ~NIX_TM_NODE_HWRES;
578 req = mbox_alloc_msg_nix_txsch_free(mbox);
582 req->schq_lvl = node->hw_lvl;
584 rc = mbox_process(mbox);
586 plt_err("failed to release hwres %s(%u) rc %d",
587 nix_tm_hwlvl2str(node->hw_lvl), hw_id, rc);
591 /* Mark parent as dirty for reallocing it's children */
593 node->parent->child_realloc = true;
595 node->hw_id = NIX_TM_HW_ID_INVALID;
596 node->flags &= ~NIX_TM_NODE_HWRES;
597 plt_tm_dbg("Released hwres %s(%u) to af",
598 nix_tm_hwlvl2str(node->hw_lvl), hw_id);
603 nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
604 enum roc_nix_tm_tree tree, bool free)
606 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
607 struct nix_tm_shaper_profile *profile;
608 struct nix_tm_node *node, *child;
609 struct nix_tm_node_list *list;
613 plt_tm_dbg("Delete node id %u tree %u", node_id, tree);
615 node = nix_tm_node_search(nix, node_id, tree);
617 return NIX_ERR_TM_INVALID_NODE;
619 list = nix_tm_node_list(nix, tree);
620 /* Check for any existing children */
621 TAILQ_FOREACH(child, list, node) {
622 if (child->parent == node)
623 return NIX_ERR_TM_CHILD_EXISTS;
626 /* Remove shaper profile reference */
627 profile_id = node->shaper_profile_id;
628 profile = nix_tm_shaper_profile_search(nix, profile_id);
630 /* Free hw resource locally */
631 if (node->flags & NIX_TM_NODE_HWRES) {
632 rc = nix_tm_free_node_resource(nix, node);
640 TAILQ_REMOVE(list, node, node);
642 plt_tm_dbg("Deleted node %s lvl %u id %u, prio 0x%x weight 0x%x "
643 "parent %u profile 0x%x tree %u (%p)",
644 nix_tm_hwlvl2str(node->hw_lvl), node->lvl, node->id,
645 node->priority, node->weight,
646 node->parent ? node->parent->id : UINT32_MAX,
647 node->shaper_profile_id, tree, node);
648 /* Free only if requested */
650 nix_tm_node_free(node);
655 nix_tm_assign_hw_id(struct nix *nix, struct nix_tm_node *parent,
656 uint16_t *contig_id, int *contig_cnt,
657 struct nix_tm_node_list *list)
659 struct nix_tm_node *child;
660 struct plt_bitmap *bmp;
661 uint8_t child_hw_lvl;
667 child_hw_lvl = parent->hw_lvl - 1;
668 bmp = nix->schq_bmp[child_hw_lvl];
669 plt_bitmap_scan_init(bmp);
672 /* Save spare schq if it is case of RR + SP */
673 if (parent->rr_prio != 0xf && *contig_cnt > 1)
674 spare_schq = *contig_id + parent->rr_prio;
676 TAILQ_FOREACH(child, list, node) {
679 if (child->parent->id != parent->id)
682 /* Resource never expected to be present */
683 if (child->flags & NIX_TM_NODE_HWRES) {
684 plt_err("Resource exists for child (%s)%u, id %u (%p)",
685 nix_tm_hwlvl2str(child->hw_lvl), child->hw_id,
691 plt_bitmap_scan(bmp, &pos, &slab);
693 if (child->priority == parent->rr_prio && spare_schq != -1) {
694 /* Use spare schq first if present */
697 *contig_cnt = *contig_cnt - 1;
699 } else if (child->priority == parent->rr_prio) {
700 /* Assign a discontiguous queue */
702 plt_err("Schq not found for Child %u "
704 child->id, child->lvl, child);
708 schq = bitmap_ctzll(slab);
709 slab &= ~(1ULL << schq);
711 plt_bitmap_clear(bmp, schq);
713 /* Assign a contiguous queue */
714 schq = *contig_id + child->priority;
715 *contig_cnt = *contig_cnt - 1;
718 plt_tm_dbg("Resource %s(%u), for lvl %u id %u(%p)",
719 nix_tm_hwlvl2str(child->hw_lvl), schq, child->lvl,
723 child->parent_hw_id = parent->hw_id;
724 child->flags |= NIX_TM_NODE_HWRES;
731 nix_tm_assign_resources(struct nix *nix, enum roc_nix_tm_tree tree)
733 struct nix_tm_node *parent, *root = NULL;
734 struct plt_bitmap *bmp, *bmp_contig;
735 struct nix_tm_node_list *list;
736 uint8_t child_hw_lvl, hw_lvl;
737 uint16_t contig_id, j;
742 list = nix_tm_node_list(nix, tree);
743 /* Walk from TL1 to TL4 parents */
744 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
745 TAILQ_FOREACH(parent, list, node) {
746 child_hw_lvl = parent->hw_lvl - 1;
747 if (parent->hw_lvl != hw_lvl)
750 /* Remember root for future */
751 if (parent->hw_lvl == nix->tm_root_lvl)
754 if (!parent->child_realloc) {
755 /* Skip when parent is not dirty */
756 if (nix_tm_child_res_valid(list, parent))
758 plt_err("Parent not dirty but invalid "
759 "child res parent id %u(lvl %u)",
760 parent->id, parent->lvl);
764 bmp_contig = nix->schq_contig_bmp[child_hw_lvl];
766 /* Prealloc contiguous indices for a parent */
767 contig_id = NIX_TM_MAX_HW_TXSCHQ;
768 cnt = (int)parent->max_prio + 1;
770 plt_bitmap_scan_init(bmp_contig);
771 if (!plt_bitmap_scan(bmp_contig, &pos, &slab)) {
772 plt_err("Contig schq not found");
775 contig_id = pos + bitmap_ctzll(slab);
777 /* Check if we have enough */
778 for (j = contig_id; j < contig_id + cnt; j++) {
779 if (!plt_bitmap_get(bmp_contig, j))
783 if (j != contig_id + cnt) {
784 plt_err("Contig schq not sufficient");
788 for (j = contig_id; j < contig_id + cnt; j++)
789 plt_bitmap_clear(bmp_contig, j);
792 /* Assign hw id to all children */
793 rc = nix_tm_assign_hw_id(nix, parent, &contig_id, &cnt,
796 plt_err("Unexpected err, contig res alloc, "
797 "parent %u, of %s, rc=%d, cnt=%d",
798 parent->id, nix_tm_hwlvl2str(hw_lvl),
803 /* Clear the dirty bit as children's
804 * resources are reallocated.
806 parent->child_realloc = false;
810 /* Root is always expected to be there */
814 if (root->flags & NIX_TM_NODE_HWRES)
817 /* Process root node */
818 bmp = nix->schq_bmp[nix->tm_root_lvl];
819 plt_bitmap_scan_init(bmp);
820 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
821 plt_err("Resource not allocated for root");
825 root->hw_id = pos + bitmap_ctzll(slab);
826 root->flags |= NIX_TM_NODE_HWRES;
827 plt_bitmap_clear(bmp, root->hw_id);
829 /* Get TL1 id as well when root is not TL1 */
830 if (!nix_tm_have_tl1_access(nix)) {
831 bmp = nix->schq_bmp[NIX_TXSCH_LVL_TL1];
833 plt_bitmap_scan_init(bmp);
834 if (!plt_bitmap_scan(bmp, &pos, &slab)) {
835 plt_err("Resource not found for TL1");
838 root->parent_hw_id = pos + bitmap_ctzll(slab);
839 plt_bitmap_clear(bmp, root->parent_hw_id);
842 plt_tm_dbg("Resource %s(%u) for root(id %u) (%p)",
843 nix_tm_hwlvl2str(root->hw_lvl), root->hw_id, root->id, root);
849 nix_tm_copy_rsp_to_nix(struct nix *nix, struct nix_txsch_alloc_rsp *rsp)
854 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
855 for (i = 0; i < rsp->schq[lvl]; i++)
856 plt_bitmap_set(nix->schq_bmp[lvl],
857 rsp->schq_list[lvl][i]);
859 for (i = 0; i < rsp->schq_contig[lvl]; i++)
860 plt_bitmap_set(nix->schq_contig_bmp[lvl],
861 rsp->schq_contig_list[lvl][i]);
866 nix_tm_alloc_txschq(struct nix *nix, enum roc_nix_tm_tree tree)
868 uint16_t schq_contig[NIX_TXSCH_LVL_CNT];
869 struct mbox *mbox = (&nix->dev)->mbox;
870 uint16_t schq[NIX_TXSCH_LVL_CNT];
871 struct nix_txsch_alloc_req *req;
872 struct nix_txsch_alloc_rsp *rsp;
877 memset(schq, 0, sizeof(schq));
878 memset(schq_contig, 0, sizeof(schq_contig));
880 /* Estimate requirement */
881 rc = nix_tm_resource_estimate(nix, schq_contig, schq, tree);
885 /* Release existing contiguous resources when realloc requested
886 * as there is no way to guarantee continuity of old with new.
888 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
889 if (schq_contig[hw_lvl])
890 nix_tm_release_resources(nix, hw_lvl, true, false);
893 /* Alloc as needed */
896 req = mbox_alloc_msg_nix_txsch_alloc(mbox);
901 mbox_memcpy(req->schq, schq, sizeof(req->schq));
902 mbox_memcpy(req->schq_contig, schq_contig,
903 sizeof(req->schq_contig));
905 /* Each alloc can be at max of MAX_TXSCHQ_PER_FUNC per level.
906 * So split alloc to multiple requests.
908 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
909 if (req->schq[i] > MAX_TXSCHQ_PER_FUNC)
910 req->schq[i] = MAX_TXSCHQ_PER_FUNC;
911 schq[i] -= req->schq[i];
913 if (req->schq_contig[i] > MAX_TXSCHQ_PER_FUNC)
914 req->schq_contig[i] = MAX_TXSCHQ_PER_FUNC;
915 schq_contig[i] -= req->schq_contig[i];
917 if (schq[i] || schq_contig[i])
921 rc = mbox_process_msg(mbox, (void *)&rsp);
925 nix_tm_copy_rsp_to_nix(nix, rsp);
928 nix->tm_link_cfg_lvl = rsp->link_cfg_lvl;
931 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
932 if (nix_tm_release_resources(nix, i, true, false))
933 plt_err("Failed to release contig resources of "
936 if (nix_tm_release_resources(nix, i, false, false))
937 plt_err("Failed to release discontig resources of "
945 nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
947 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
948 struct nix_tm_shaper_profile *profile;
949 struct nix_tm_node *node, *next_node;
950 struct nix_tm_node_list *list;
951 enum roc_nix_tm_tree tree;
955 for (tree = 0; tree < ROC_NIX_TM_TREE_MAX; tree++) {
956 if (!(tree_mask & BIT(tree)))
959 plt_tm_dbg("Freeing resources of tree %u", tree);
961 list = nix_tm_node_list(nix, tree);
962 next_node = TAILQ_FIRST(list);
965 next_node = TAILQ_NEXT(node, node);
967 if (!nix_tm_is_leaf(nix, node->lvl) &&
968 node->flags & NIX_TM_NODE_HWRES) {
969 /* Clear xoff in path for flush to succeed */
970 rc = nix_tm_clear_path_xoff(nix, node);
973 rc = nix_tm_free_node_resource(nix, node);
979 /* Leave software elements if needed */
983 next_node = TAILQ_FIRST(list);
986 next_node = TAILQ_NEXT(node, node);
988 plt_tm_dbg("Free node lvl %u id %u (%p)", node->lvl,
991 profile_id = node->shaper_profile_id;
992 profile = nix_tm_shaper_profile_search(nix, profile_id);
996 TAILQ_REMOVE(list, node, node);
997 nix_tm_node_free(node);
1004 nix_tm_conf_init(struct roc_nix *roc_nix)
1006 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1007 uint32_t bmp_sz, hw_lvl;
1011 PLT_STATIC_ASSERT(sizeof(struct nix_tm_node) <= ROC_NIX_TM_NODE_SZ);
1012 PLT_STATIC_ASSERT(sizeof(struct nix_tm_shaper_profile) <=
1013 ROC_NIX_TM_SHAPER_PROFILE_SZ);
1016 for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++)
1017 TAILQ_INIT(&nix->trees[i]);
1019 TAILQ_INIT(&nix->shaper_profile_list);
1020 nix->tm_rate_min = 1E9; /* 1Gbps */
1023 bmp_sz = plt_bitmap_get_memory_footprint(NIX_TM_MAX_HW_TXSCHQ);
1024 bmp_mem = plt_zmalloc(bmp_sz * NIX_TXSCH_LVL_CNT * 2, 0);
1027 nix->schq_bmp_mem = bmp_mem;
1029 /* Init contiguous and discontiguous bitmap per lvl */
1031 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1032 /* Bitmap for discontiguous resource */
1033 nix->schq_bmp[hw_lvl] =
1034 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1035 if (!nix->schq_bmp[hw_lvl])
1038 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1040 /* Bitmap for contiguous resource */
1041 nix->schq_contig_bmp[hw_lvl] =
1042 plt_bitmap_init(NIX_TM_MAX_HW_TXSCHQ, bmp_mem, bmp_sz);
1043 if (!nix->schq_contig_bmp[hw_lvl])
1046 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
1049 /* Disable TL1 Static Priority when VF's are enabled
1050 * as otherwise VF's TL2 reallocation will be needed
1051 * runtime to support a specific topology of PF.
1053 if (nix->pci_dev->max_vfs)
1054 nix->tm_flags |= NIX_TM_TL1_NO_SP;
1056 /* TL1 access is only for PF's */
1057 if (roc_nix_is_pf(roc_nix)) {
1058 nix->tm_flags |= NIX_TM_TL1_ACCESS;
1059 nix->tm_root_lvl = NIX_TXSCH_LVL_TL1;
1061 nix->tm_root_lvl = NIX_TXSCH_LVL_TL2;
1066 nix_tm_conf_fini(roc_nix);
1071 nix_tm_conf_fini(struct roc_nix *roc_nix)
1073 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1076 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
1077 plt_bitmap_free(nix->schq_bmp[hw_lvl]);
1078 plt_bitmap_free(nix->schq_contig_bmp[hw_lvl]);
1080 plt_free(nix->schq_bmp_mem);