1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
11 return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
12 (shaper->div_exp << 13) | (shaper->exponent << 9) |
13 (shaper->mantissa << 1);
17 nix_tm_lvl2nix_tl1_root(uint32_t lvl)
21 return NIX_TXSCH_LVL_TL1;
23 return NIX_TXSCH_LVL_TL2;
25 return NIX_TXSCH_LVL_TL3;
27 return NIX_TXSCH_LVL_TL4;
29 return NIX_TXSCH_LVL_SMQ;
31 return NIX_TXSCH_LVL_CNT;
36 nix_tm_lvl2nix_tl2_root(uint32_t lvl)
40 return NIX_TXSCH_LVL_TL2;
42 return NIX_TXSCH_LVL_TL3;
44 return NIX_TXSCH_LVL_TL4;
46 return NIX_TXSCH_LVL_SMQ;
48 return NIX_TXSCH_LVL_CNT;
53 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl)
55 if (nix_tm_have_tl1_access(nix))
56 return nix_tm_lvl2nix_tl1_root(lvl);
58 return nix_tm_lvl2nix_tl2_root(lvl);
62 nix_tm_relchan_get(struct nix *nix)
64 return nix->tx_chan_base & 0xff;
68 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id,
69 enum roc_nix_tm_tree tree)
71 struct nix_tm_node *child_node;
72 struct nix_tm_node_list *list;
74 list = nix_tm_node_list(nix, tree);
76 TAILQ_FOREACH(child_node, list, node) {
77 if (!child_node->parent)
79 if (!(child_node->parent->id == node_id))
81 if (child_node->priority == child_node->parent->rr_prio)
83 return child_node->hw_id - child_node->priority;
88 struct nix_tm_shaper_profile *
89 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id)
91 struct nix_tm_shaper_profile *profile;
93 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
94 if (profile->id == id)
101 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree)
103 struct nix_tm_node_list *list;
104 struct nix_tm_node *node;
106 list = nix_tm_node_list(nix, tree);
107 TAILQ_FOREACH(node, list, node) {
108 if (node->id == node_id)
115 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p,
116 uint64_t *mantissa_p, uint64_t *div_exp_p)
118 uint64_t div_exp, exponent, mantissa;
120 /* Boundary checks */
121 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE)
124 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) {
125 /* Calculate rate div_exp and mantissa using
126 * the following formula:
128 * value = (2E6 * (256 + mantissa)
129 * / ((1 << div_exp) * 256))
133 mantissa = NIX_TM_MAX_RATE_MANTISSA;
135 while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp)))
138 while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) /
139 ((1 << div_exp) * 256)))
142 /* Calculate rate exponent and mantissa using
143 * the following formula:
145 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
149 exponent = NIX_TM_MAX_RATE_EXPONENT;
150 mantissa = NIX_TM_MAX_RATE_MANTISSA;
152 while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent)))
155 while (value < ((NIX_TM_SHAPER_RATE_CONST *
156 ((256 + mantissa) << exponent)) /
161 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP ||
162 exponent > NIX_TM_MAX_RATE_EXPONENT ||
163 mantissa > NIX_TM_MAX_RATE_MANTISSA)
167 *div_exp_p = div_exp;
169 *exponent_p = exponent;
171 *mantissa_p = mantissa;
173 /* Calculate real rate value */
174 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp);
178 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
179 uint64_t *mantissa_p)
181 uint64_t exponent, mantissa;
183 if (value < NIX_TM_MIN_SHAPER_BURST || value > NIX_TM_MAX_SHAPER_BURST)
186 /* Calculate burst exponent and mantissa using
187 * the following formula:
189 * value = (((256 + mantissa) << (exponent + 1)
193 exponent = NIX_TM_MAX_BURST_EXPONENT;
194 mantissa = NIX_TM_MAX_BURST_MANTISSA;
196 while (value < (1ull << (exponent + 1)))
199 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
202 if (exponent > NIX_TM_MAX_BURST_EXPONENT ||
203 mantissa > NIX_TM_MAX_BURST_MANTISSA)
207 *exponent_p = exponent;
209 *mantissa_p = mantissa;
211 return NIX_TM_SHAPER_BURST(exponent, mantissa);
215 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
216 struct nix_tm_shaper_data *cir,
217 struct nix_tm_shaper_data *pir)
222 /* Calculate CIR exponent and mantissa */
223 if (profile->commit.rate)
224 cir->rate = nix_tm_shaper_rate_conv(
225 profile->commit.rate, &cir->exponent, &cir->mantissa,
228 /* Calculate PIR exponent and mantissa */
229 if (profile->peak.rate)
230 pir->rate = nix_tm_shaper_rate_conv(
231 profile->peak.rate, &pir->exponent, &pir->mantissa,
234 /* Calculate CIR burst exponent and mantissa */
235 if (profile->commit.size)
236 cir->burst = nix_tm_shaper_burst_conv(profile->commit.size,
237 &cir->burst_exponent,
238 &cir->burst_mantissa);
240 /* Calculate PIR burst exponent and mantissa */
241 if (profile->peak.size)
242 pir->burst = nix_tm_shaper_burst_conv(profile->peak.size,
243 &pir->burst_exponent,
244 &pir->burst_mantissa);
248 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree,
249 uint32_t *rr_prio, uint32_t *max_prio)
251 uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX];
252 struct nix_tm_node_list *list;
253 struct nix_tm_node *node;
254 uint32_t rr_num = 0, i;
255 uint32_t children = 0;
258 memset(node_cnt, 0, sizeof(node_cnt));
260 *max_prio = UINT32_MAX;
262 list = nix_tm_node_list(nix, tree);
263 TAILQ_FOREACH(node, list, node) {
267 if (!(node->parent->id == parent_id))
270 priority = node->priority;
271 node_cnt[priority]++;
275 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) {
279 if (node_cnt[i] > rr_num) {
281 rr_num = node_cnt[i];
285 /* RR group of single RR child is considered as SP */
291 /* Max prio will be returned only when we have non zero prio
292 * or if a parent has single child.
294 if (i > 1 || (children == 1))
300 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl)
302 if (hw_lvl >= NIX_TXSCH_LVL_CNT)
305 /* MDQ does not support SP */
306 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
309 /* PF's TL1 with VF's enabled does not support SP */
310 if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) ||
311 (nix->tm_flags & NIX_TM_TL1_NO_SP)))
314 return NIX_TM_TLX_SP_PRIO_MAX - 1;
318 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id,
319 uint32_t priority, enum roc_nix_tm_tree tree)
321 uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX];
322 struct nix_tm_node_list *list;
323 struct nix_tm_node *node;
327 list = nix_tm_node_list(nix, tree);
328 /* Validate priority against max */
329 if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1)))
330 return NIX_ERR_TM_PRIO_EXCEEDED;
332 if (parent_id == ROC_NIX_TM_NODE_ID_INVALID)
335 memset(priorities, 0, sizeof(priorities));
336 priorities[priority] = 1;
338 TAILQ_FOREACH(node, list, node) {
342 if (node->parent->id != parent_id)
345 priorities[node->priority]++;
348 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++)
349 if (priorities[i] > 1)
352 /* At max, one rr groups per parent */
354 return NIX_ERR_TM_MULTIPLE_RR_GROUPS;
356 /* Check for previous priority to avoid holes in priorities */
357 if (priority && !priorities[priority - 1])
358 return NIX_ERR_TM_PRIO_ORDER;
364 nix_tm_child_res_valid(struct nix_tm_node_list *list,
365 struct nix_tm_node *parent)
367 struct nix_tm_node *child;
369 TAILQ_FOREACH(child, list, node) {
370 if (child->parent != parent)
372 if (!(child->flags & NIX_TM_NODE_HWRES))
379 nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,
380 volatile uint64_t *regval)
385 * Default config for TL1.
386 * For VF this is always ignored.
388 plt_tm_dbg("Default config for main root %s(%u)",
389 nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq);
391 /* Set DWRR quantum */
392 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
393 regval[k] = NIX_TM_TL1_DFLT_RR_QTM;
396 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
397 regval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1);
400 reg[k] = NIX_AF_TL1X_CIR(schq);
408 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
409 volatile uint64_t *reg, volatile uint64_t *regval,
410 volatile uint64_t *regval_mask)
412 uint8_t k = 0, hw_lvl, parent_lvl;
413 uint64_t parent = 0, child = 0;
414 enum roc_nix_tm_tree tree;
415 uint32_t rr_prio, schq;
416 uint16_t link, relchan;
420 hw_lvl = node->hw_lvl;
421 parent_lvl = hw_lvl + 1;
422 rr_prio = node->rr_prio;
424 /* Root node will not have a parent node */
425 if (hw_lvl == nix->tm_root_lvl)
426 parent = node->parent_hw_id;
428 parent = node->parent->hw_id;
431 relchan = nix_tm_relchan_get(nix);
433 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
434 child = nix_tm_find_prio_anchor(nix, node->id, tree);
436 /* Override default rr_prio when TL1
437 * Static Priority is disabled
439 if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) {
440 rr_prio = NIX_TM_TL1_DFLT_RR_PRIO;
444 plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u"
445 " prio_anchor %" PRIu64 " rr_prio %u (%p)",
446 nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl),
447 parent, node->lvl, node->id, child, rr_prio, node);
449 /* Prepare Topology and Link config */
451 case NIX_TXSCH_LVL_SMQ:
453 /* Set xoff which will be cleared later */
454 reg[k] = NIX_AF_SMQX_CFG(schq);
455 regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |
456 ((nix->mtu & 0xFFFF) << 8));
458 ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | GENMASK_ULL(23, 8));
461 /* Parent and schedule conf */
462 reg[k] = NIX_AF_MDQX_PARENT(schq);
463 regval[k] = parent << 16;
467 case NIX_TXSCH_LVL_TL4:
468 /* Parent and schedule conf */
469 reg[k] = NIX_AF_TL4X_PARENT(schq);
470 regval[k] = parent << 16;
473 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
474 regval[k] = (child << 32) | (rr_prio << 1);
477 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
479 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
480 regval[k] = BIT_ULL(12);
484 case NIX_TXSCH_LVL_TL3:
485 /* Parent and schedule conf */
486 reg[k] = NIX_AF_TL3X_PARENT(schq);
487 regval[k] = parent << 16;
490 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
491 regval[k] = (child << 32) | (rr_prio << 1);
494 /* Link configuration */
495 if (!nix->sdp_link &&
496 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
497 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
498 regval[k] = BIT_ULL(12) | relchan;
503 case NIX_TXSCH_LVL_TL2:
504 /* Parent and schedule conf */
505 reg[k] = NIX_AF_TL2X_PARENT(schq);
506 regval[k] = parent << 16;
509 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
510 regval[k] = (child << 32) | (rr_prio << 1);
513 /* Link configuration */
514 if (!nix->sdp_link &&
515 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
516 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
517 regval[k] = BIT_ULL(12) | relchan;
522 case NIX_TXSCH_LVL_TL1:
523 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
524 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
534 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
535 volatile uint64_t *reg, volatile uint64_t *regval)
537 uint64_t strict_prio = node->priority;
538 uint32_t hw_lvl = node->hw_lvl;
539 uint32_t schq = node->hw_id;
543 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
545 /* For children to root, strict prio is default if either
546 * device root is TL2 or TL1 Static Priority is disabled.
548 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
549 (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP))
550 strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
552 plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
553 "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
554 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
555 strict_prio, rr_quantum, node);
558 case NIX_TXSCH_LVL_SMQ:
559 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
560 regval[k] = (strict_prio << 24) | rr_quantum;
564 case NIX_TXSCH_LVL_TL4:
565 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
566 regval[k] = (strict_prio << 24) | rr_quantum;
570 case NIX_TXSCH_LVL_TL3:
571 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
572 regval[k] = (strict_prio << 24) | rr_quantum;
576 case NIX_TXSCH_LVL_TL2:
577 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
578 regval[k] = (strict_prio << 24) | rr_quantum;
582 case NIX_TXSCH_LVL_TL1:
583 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
584 regval[k] = rr_quantum;
594 nix_tm_shaper_reg_prep(struct nix_tm_node *node,
595 struct nix_tm_shaper_profile *profile,
596 volatile uint64_t *reg, volatile uint64_t *regval)
598 struct nix_tm_shaper_data cir, pir;
599 uint32_t schq = node->hw_id;
603 memset(&cir, 0, sizeof(cir));
604 memset(&pir, 0, sizeof(pir));
605 nix_tm_shaper_conf_get(profile, &cir, &pir);
610 adjust = profile->pkt_len_adj;
612 plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
613 "pir %" PRIu64 "(%" PRIu64 "B),"
614 " cir %" PRIu64 "(%" PRIu64 "B)"
615 "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
616 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
617 pir.rate, pir.burst, cir.rate, cir.burst, adjust,
618 node->pkt_mode, node);
620 switch (node->hw_lvl) {
621 case NIX_TXSCH_LVL_SMQ:
622 /* Configure PIR, CIR */
623 reg[k] = NIX_AF_MDQX_PIR(schq);
624 regval[k] = (pir.rate && pir.burst) ?
625 (nix_tm_shaper2regval(&pir) | 1) :
629 reg[k] = NIX_AF_MDQX_CIR(schq);
630 regval[k] = (cir.rate && cir.burst) ?
631 (nix_tm_shaper2regval(&cir) | 1) :
635 /* Configure RED ALG */
636 reg[k] = NIX_AF_MDQX_SHAPE(schq);
637 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
638 (uint64_t)node->pkt_mode << 24);
641 case NIX_TXSCH_LVL_TL4:
642 /* Configure PIR, CIR */
643 reg[k] = NIX_AF_TL4X_PIR(schq);
644 regval[k] = (pir.rate && pir.burst) ?
645 (nix_tm_shaper2regval(&pir) | 1) :
649 reg[k] = NIX_AF_TL4X_CIR(schq);
650 regval[k] = (cir.rate && cir.burst) ?
651 (nix_tm_shaper2regval(&cir) | 1) :
655 /* Configure RED algo */
656 reg[k] = NIX_AF_TL4X_SHAPE(schq);
657 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
658 (uint64_t)node->pkt_mode << 24);
661 case NIX_TXSCH_LVL_TL3:
662 /* Configure PIR, CIR */
663 reg[k] = NIX_AF_TL3X_PIR(schq);
664 regval[k] = (pir.rate && pir.burst) ?
665 (nix_tm_shaper2regval(&pir) | 1) :
669 reg[k] = NIX_AF_TL3X_CIR(schq);
670 regval[k] = (cir.rate && cir.burst) ?
671 (nix_tm_shaper2regval(&cir) | 1) :
675 /* Configure RED algo */
676 reg[k] = NIX_AF_TL3X_SHAPE(schq);
677 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
678 (uint64_t)node->pkt_mode);
682 case NIX_TXSCH_LVL_TL2:
683 /* Configure PIR, CIR */
684 reg[k] = NIX_AF_TL2X_PIR(schq);
685 regval[k] = (pir.rate && pir.burst) ?
686 (nix_tm_shaper2regval(&pir) | 1) :
690 reg[k] = NIX_AF_TL2X_CIR(schq);
691 regval[k] = (cir.rate && cir.burst) ?
692 (nix_tm_shaper2regval(&cir) | 1) :
696 /* Configure RED algo */
697 reg[k] = NIX_AF_TL2X_SHAPE(schq);
698 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
699 (uint64_t)node->pkt_mode << 24);
703 case NIX_TXSCH_LVL_TL1:
705 reg[k] = NIX_AF_TL1X_CIR(schq);
706 regval[k] = (cir.rate && cir.burst) ?
707 (nix_tm_shaper2regval(&cir) | 1) :
711 /* Configure length disable and adjust */
712 reg[k] = NIX_AF_TL1X_SHAPE(schq);
713 regval[k] = (adjust | (uint64_t)node->pkt_mode << 24);
722 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
723 volatile uint64_t *reg, volatile uint64_t *regval)
725 uint32_t hw_lvl = node->hw_lvl;
726 uint32_t schq = node->hw_id;
729 plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
730 nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable,
736 case NIX_TXSCH_LVL_MDQ:
737 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
740 case NIX_TXSCH_LVL_TL4:
741 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
744 case NIX_TXSCH_LVL_TL3:
745 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
748 case NIX_TXSCH_LVL_TL2:
749 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
752 case NIX_TXSCH_LVL_TL1:
753 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
763 /* Search for min rate in topology */
765 nix_tm_shaper_profile_rate_min(struct nix *nix)
767 struct nix_tm_shaper_profile *profile;
768 uint64_t rate_min = 1E9; /* 1 Gbps */
770 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
771 if (profile->peak.rate && profile->peak.rate < rate_min)
772 rate_min = profile->peak.rate;
774 if (profile->commit.rate && profile->commit.rate < rate_min)
775 rate_min = profile->commit.rate;
781 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig)
783 uint32_t pos = 0, start_pos = 0;
784 struct plt_bitmap *bmp;
788 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
789 plt_bitmap_scan_init(bmp);
791 if (!plt_bitmap_scan(bmp, &pos, &slab))
797 count += __builtin_popcountll(slab);
798 if (!plt_bitmap_scan(bmp, &pos, &slab))
800 } while (pos != start_pos);
806 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq,
807 enum roc_nix_tm_tree tree)
809 struct nix_tm_node_list *list;
810 uint8_t contig_cnt, hw_lvl;
811 struct nix_tm_node *parent;
812 uint16_t cnt = 0, avail;
814 list = nix_tm_node_list(nix, tree);
815 /* Walk through parents from TL1..TL4 */
816 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
817 TAILQ_FOREACH(parent, list, node) {
818 if (hw_lvl != parent->hw_lvl)
821 /* Skip accounting for children whose
822 * parent does not indicate so.
824 if (!parent->child_realloc)
827 /* Count children needed */
828 schq[hw_lvl - 1] += parent->rr_num;
829 if (parent->max_prio != UINT32_MAX) {
830 contig_cnt = parent->max_prio + 1;
831 schq_contig[hw_lvl - 1] += contig_cnt;
832 /* When we have SP + DWRR at a parent,
833 * we will always have a spare schq at rr prio
834 * location in contiguous queues. Hence reduce
835 * discontiguous count by 1.
837 if (parent->max_prio > 0 && parent->rr_num)
838 schq[hw_lvl - 1] -= 1;
843 schq[nix->tm_root_lvl] = 1;
844 if (!nix_tm_have_tl1_access(nix))
845 schq[NIX_TXSCH_LVL_TL1] = 1;
847 /* Now check for existing resources */
848 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
849 avail = nix_tm_resource_avail(nix, hw_lvl, false);
850 if (schq[hw_lvl] <= avail)
853 schq[hw_lvl] -= avail;
855 /* For contiguous queues, realloc everything */
856 avail = nix_tm_resource_avail(nix, hw_lvl, true);
857 if (schq_contig[hw_lvl] <= avail)
858 schq_contig[hw_lvl] = 0;
861 cnt += schq_contig[hw_lvl];
863 plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u",
864 nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl],
865 schq_contig[hw_lvl]);
872 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id)
874 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
875 struct nix_tm_node *node;
877 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
879 return NIX_ERR_TM_INVALID_NODE;
884 struct roc_nix_tm_node *
885 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id)
887 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
888 struct nix_tm_node *node;
890 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
891 return (struct roc_nix_tm_node *)node;
894 struct roc_nix_tm_node *
895 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev)
897 struct nix_tm_node *prev = (struct nix_tm_node *)__prev;
898 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
899 struct nix_tm_node_list *list;
901 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
903 /* HEAD of the list */
905 return (struct roc_nix_tm_node *)TAILQ_FIRST(list);
908 if (prev->tree != ROC_NIX_TM_USER)
911 return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node);
914 struct roc_nix_tm_shaper_profile *
915 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id)
917 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
918 struct nix_tm_shaper_profile *profile;
920 profile = nix_tm_shaper_profile_search(nix, profile_id);
921 return (struct roc_nix_tm_shaper_profile *)profile;
924 struct roc_nix_tm_shaper_profile *
925 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix,
926 struct roc_nix_tm_shaper_profile *__prev)
928 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
929 struct nix_tm_shaper_profile_list *list;
930 struct nix_tm_shaper_profile *prev;
932 prev = (struct nix_tm_shaper_profile *)__prev;
933 list = &nix->shaper_profile_list;
935 /* HEAD of the list */
937 return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list);
939 return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper);
943 nix_tm_node_alloc(void)
945 struct nix_tm_node *node;
947 node = plt_zmalloc(sizeof(struct nix_tm_node), 0);
951 node->free_fn = plt_free;
956 nix_tm_node_free(struct nix_tm_node *node)
958 if (!node || node->free_fn == NULL)
961 (node->free_fn)(node);
964 struct nix_tm_shaper_profile *
965 nix_tm_shaper_profile_alloc(void)
967 struct nix_tm_shaper_profile *profile;
969 profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0);
973 profile->free_fn = plt_free;
978 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile)
980 if (!profile || !profile->free_fn)
983 (profile->free_fn)(profile);