1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
13 if (roc_model_is_cn9k()) {
14 regval = (shaper->burst_exponent << 37);
15 regval |= (shaper->burst_mantissa << 29);
16 regval |= (shaper->div_exp << 13);
17 regval |= (shaper->exponent << 9);
18 regval |= (shaper->mantissa << 1);
22 regval = (shaper->burst_exponent << 44);
23 regval |= (shaper->burst_mantissa << 29);
24 regval |= (shaper->div_exp << 13);
25 regval |= (shaper->exponent << 9);
26 regval |= (shaper->mantissa << 1);
31 nix_tm_lvl2nix_tl1_root(uint32_t lvl)
35 return NIX_TXSCH_LVL_TL1;
37 return NIX_TXSCH_LVL_TL2;
39 return NIX_TXSCH_LVL_TL3;
41 return NIX_TXSCH_LVL_TL4;
43 return NIX_TXSCH_LVL_SMQ;
45 return NIX_TXSCH_LVL_CNT;
50 nix_tm_lvl2nix_tl2_root(uint32_t lvl)
54 return NIX_TXSCH_LVL_TL2;
56 return NIX_TXSCH_LVL_TL3;
58 return NIX_TXSCH_LVL_TL4;
60 return NIX_TXSCH_LVL_SMQ;
62 return NIX_TXSCH_LVL_CNT;
67 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl)
69 if (nix_tm_have_tl1_access(nix))
70 return nix_tm_lvl2nix_tl1_root(lvl);
72 return nix_tm_lvl2nix_tl2_root(lvl);
76 nix_tm_relchan_get(struct nix *nix)
78 return nix->tx_chan_base & 0xff;
82 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id,
83 enum roc_nix_tm_tree tree)
85 struct nix_tm_node *child_node;
86 struct nix_tm_node_list *list;
88 list = nix_tm_node_list(nix, tree);
90 TAILQ_FOREACH(child_node, list, node) {
91 if (!child_node->parent)
93 if (!(child_node->parent->id == node_id))
95 if (child_node->priority == child_node->parent->rr_prio)
97 return child_node->hw_id - child_node->priority;
102 struct nix_tm_shaper_profile *
103 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id)
105 struct nix_tm_shaper_profile *profile;
107 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
108 if (profile->id == id)
115 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree)
117 struct nix_tm_node_list *list;
118 struct nix_tm_node *node;
120 list = nix_tm_node_list(nix, tree);
121 TAILQ_FOREACH(node, list, node) {
122 if (node->id == node_id)
129 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p,
130 uint64_t *mantissa_p, uint64_t *div_exp_p)
132 uint64_t div_exp, exponent, mantissa;
134 /* Boundary checks */
135 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE)
138 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) {
139 /* Calculate rate div_exp and mantissa using
140 * the following formula:
142 * value = (2E6 * (256 + mantissa)
143 * / ((1 << div_exp) * 256))
147 mantissa = NIX_TM_MAX_RATE_MANTISSA;
149 while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp)))
152 while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) /
153 ((1 << div_exp) * 256)))
156 /* Calculate rate exponent and mantissa using
157 * the following formula:
159 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
163 exponent = NIX_TM_MAX_RATE_EXPONENT;
164 mantissa = NIX_TM_MAX_RATE_MANTISSA;
166 while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent)))
169 while (value < ((NIX_TM_SHAPER_RATE_CONST *
170 ((256 + mantissa) << exponent)) /
175 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP ||
176 exponent > NIX_TM_MAX_RATE_EXPONENT ||
177 mantissa > NIX_TM_MAX_RATE_MANTISSA)
181 *div_exp_p = div_exp;
183 *exponent_p = exponent;
185 *mantissa_p = mantissa;
187 /* Calculate real rate value */
188 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp);
192 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
193 uint64_t *mantissa_p)
195 uint64_t min_burst, max_burst;
196 uint64_t exponent, mantissa;
197 uint32_t max_mantissa;
199 min_burst = NIX_TM_MIN_SHAPER_BURST;
200 max_burst = roc_nix_tm_max_shaper_burst_get();
202 if (value < min_burst || value > max_burst)
205 max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA :
206 NIX_TM_MAX_BURST_MANTISSA);
207 /* Calculate burst exponent and mantissa using
208 * the following formula:
210 * value = (((256 + mantissa) << (exponent + 1) / 256)
213 exponent = NIX_TM_MAX_BURST_EXPONENT;
214 mantissa = max_mantissa;
216 while (value < (1ull << (exponent + 1)))
219 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
222 if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa)
226 *exponent_p = exponent;
228 *mantissa_p = mantissa;
230 return NIX_TM_SHAPER_BURST(exponent, mantissa);
234 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
235 struct nix_tm_shaper_data *cir,
236 struct nix_tm_shaper_data *pir)
238 memset(cir, 0, sizeof(*cir));
239 memset(pir, 0, sizeof(*pir));
244 /* Calculate CIR exponent and mantissa */
245 if (profile->commit.rate)
246 cir->rate = nix_tm_shaper_rate_conv(
247 profile->commit.rate, &cir->exponent, &cir->mantissa,
250 /* Calculate PIR exponent and mantissa */
251 if (profile->peak.rate)
252 pir->rate = nix_tm_shaper_rate_conv(
253 profile->peak.rate, &pir->exponent, &pir->mantissa,
256 /* Calculate CIR burst exponent and mantissa */
257 if (profile->commit.size)
258 cir->burst = nix_tm_shaper_burst_conv(profile->commit.size,
259 &cir->burst_exponent,
260 &cir->burst_mantissa);
262 /* Calculate PIR burst exponent and mantissa */
263 if (profile->peak.size)
264 pir->burst = nix_tm_shaper_burst_conv(profile->peak.size,
265 &pir->burst_exponent,
266 &pir->burst_mantissa);
270 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree,
271 uint32_t *rr_prio, uint32_t *max_prio)
273 uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX];
274 struct nix_tm_node_list *list;
275 struct nix_tm_node *node;
276 uint32_t rr_num = 0, i;
277 uint32_t children = 0;
280 memset(node_cnt, 0, sizeof(node_cnt));
282 *max_prio = UINT32_MAX;
284 list = nix_tm_node_list(nix, tree);
285 TAILQ_FOREACH(node, list, node) {
289 if (!(node->parent->id == parent_id))
292 priority = node->priority;
293 node_cnt[priority]++;
297 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) {
301 if (node_cnt[i] > rr_num) {
303 rr_num = node_cnt[i];
307 /* RR group of single RR child is considered as SP */
313 /* Max prio will be returned only when we have non zero prio
314 * or if a parent has single child.
316 if (i > 1 || (children == 1))
322 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl)
324 if (hw_lvl >= NIX_TXSCH_LVL_CNT)
327 /* MDQ does not support SP */
328 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
331 /* PF's TL1 with VF's enabled does not support SP */
332 if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) ||
333 (nix->tm_flags & NIX_TM_TL1_NO_SP)))
336 return NIX_TM_TLX_SP_PRIO_MAX - 1;
340 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id,
341 uint32_t priority, enum roc_nix_tm_tree tree)
343 uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX];
344 struct nix_tm_node_list *list;
345 struct nix_tm_node *node;
349 list = nix_tm_node_list(nix, tree);
350 /* Validate priority against max */
351 if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1)))
352 return NIX_ERR_TM_PRIO_EXCEEDED;
354 if (parent_id == ROC_NIX_TM_NODE_ID_INVALID)
357 memset(priorities, 0, sizeof(priorities));
358 priorities[priority] = 1;
360 TAILQ_FOREACH(node, list, node) {
364 if (node->parent->id != parent_id)
367 priorities[node->priority]++;
370 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++)
371 if (priorities[i] > 1)
374 /* At max, one rr groups per parent */
376 return NIX_ERR_TM_MULTIPLE_RR_GROUPS;
378 /* Check for previous priority to avoid holes in priorities */
379 if (priority && !priorities[priority - 1])
380 return NIX_ERR_TM_PRIO_ORDER;
386 nix_tm_child_res_valid(struct nix_tm_node_list *list,
387 struct nix_tm_node *parent)
389 struct nix_tm_node *child;
391 TAILQ_FOREACH(child, list, node) {
392 if (child->parent != parent)
394 if (!(child->flags & NIX_TM_NODE_HWRES))
401 nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,
402 volatile uint64_t *regval)
407 * Default config for TL1.
408 * For VF this is always ignored.
410 plt_tm_dbg("Default config for main root %s(%u)",
411 nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq);
413 /* Set DWRR quantum */
414 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
415 regval[k] = NIX_TM_TL1_DFLT_RR_QTM;
418 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
419 regval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1);
422 reg[k] = NIX_AF_TL1X_CIR(schq);
430 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
431 volatile uint64_t *reg, volatile uint64_t *regval,
432 volatile uint64_t *regval_mask)
434 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
435 uint8_t k = 0, hw_lvl, parent_lvl;
436 uint64_t parent = 0, child = 0;
437 enum roc_nix_tm_tree tree;
438 uint32_t rr_prio, schq;
439 uint16_t link, relchan;
443 hw_lvl = node->hw_lvl;
444 parent_lvl = hw_lvl + 1;
445 rr_prio = node->rr_prio;
447 /* Root node will not have a parent node */
448 if (hw_lvl == nix->tm_root_lvl)
449 parent = node->parent_hw_id;
451 parent = node->parent->hw_id;
454 relchan = nix_tm_relchan_get(nix);
456 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
457 child = nix_tm_find_prio_anchor(nix, node->id, tree);
459 /* Override default rr_prio when TL1
460 * Static Priority is disabled
462 if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) {
463 rr_prio = NIX_TM_TL1_DFLT_RR_PRIO;
467 plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u"
468 " prio_anchor %" PRIu64 " rr_prio %u (%p)",
469 nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl),
470 parent, node->lvl, node->id, child, rr_prio, node);
472 /* Prepare Topology and Link config */
474 case NIX_TXSCH_LVL_SMQ:
476 /* Set xoff which will be cleared later */
477 reg[k] = NIX_AF_SMQX_CFG(schq);
478 regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |
479 ((nix->mtu & 0xFFFF) << 8));
480 /* Maximum Vtag insertion size as a multiple of four bytes */
481 if (roc_nix->hw_vlan_ins)
482 regval[k] |= (0x2ULL << 36);
483 regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) |
484 GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36));
487 /* Parent and schedule conf */
488 reg[k] = NIX_AF_MDQX_PARENT(schq);
489 regval[k] = parent << 16;
493 case NIX_TXSCH_LVL_TL4:
494 /* Parent and schedule conf */
495 reg[k] = NIX_AF_TL4X_PARENT(schq);
496 regval[k] = parent << 16;
499 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
500 regval[k] = (child << 32) | (rr_prio << 1);
503 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
505 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
506 regval[k] = BIT_ULL(12);
510 case NIX_TXSCH_LVL_TL3:
511 /* Parent and schedule conf */
512 reg[k] = NIX_AF_TL3X_PARENT(schq);
513 regval[k] = parent << 16;
516 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
517 regval[k] = (child << 32) | (rr_prio << 1);
520 /* Link configuration */
521 if (!nix->sdp_link &&
522 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
523 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
524 regval[k] = BIT_ULL(12) | relchan;
529 case NIX_TXSCH_LVL_TL2:
530 /* Parent and schedule conf */
531 reg[k] = NIX_AF_TL2X_PARENT(schq);
532 regval[k] = parent << 16;
535 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
536 regval[k] = (child << 32) | (rr_prio << 1);
539 /* Link configuration */
540 if (!nix->sdp_link &&
541 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
542 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
543 regval[k] = BIT_ULL(12) | relchan;
548 case NIX_TXSCH_LVL_TL1:
549 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
550 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
560 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
561 volatile uint64_t *reg, volatile uint64_t *regval)
563 uint64_t strict_prio = node->priority;
564 uint32_t hw_lvl = node->hw_lvl;
565 uint32_t schq = node->hw_id;
569 /* For CN9K, weight needs to be converted to quantum */
570 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
572 /* For children to root, strict prio is default if either
573 * device root is TL2 or TL1 Static Priority is disabled.
575 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
576 (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP))
577 strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
579 plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
580 "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)",
581 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
582 strict_prio, rr_quantum, node);
585 case NIX_TXSCH_LVL_SMQ:
586 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
587 regval[k] = (strict_prio << 24) | rr_quantum;
591 case NIX_TXSCH_LVL_TL4:
592 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
593 regval[k] = (strict_prio << 24) | rr_quantum;
597 case NIX_TXSCH_LVL_TL3:
598 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
599 regval[k] = (strict_prio << 24) | rr_quantum;
603 case NIX_TXSCH_LVL_TL2:
604 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
605 regval[k] = (strict_prio << 24) | rr_quantum;
609 case NIX_TXSCH_LVL_TL1:
610 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
611 regval[k] = rr_quantum;
621 nix_tm_shaper_reg_prep(struct nix_tm_node *node,
622 struct nix_tm_shaper_profile *profile,
623 volatile uint64_t *reg, volatile uint64_t *regval)
625 struct nix_tm_shaper_data cir, pir;
626 uint32_t schq = node->hw_id;
630 nix_tm_shaper_conf_get(profile, &cir, &pir);
632 if (profile && node->pkt_mode)
633 adjust = profile->pkt_mode_adj;
635 adjust = profile->pkt_len_adj;
637 plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
638 "pir %" PRIu64 "(%" PRIu64 "B),"
639 " cir %" PRIu64 "(%" PRIu64 "B)"
640 "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
641 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
642 pir.rate, pir.burst, cir.rate, cir.burst, adjust,
643 node->pkt_mode, node);
645 switch (node->hw_lvl) {
646 case NIX_TXSCH_LVL_SMQ:
647 /* Configure PIR, CIR */
648 reg[k] = NIX_AF_MDQX_PIR(schq);
649 regval[k] = (pir.rate && pir.burst) ?
650 (nix_tm_shaper2regval(&pir) | 1) :
654 reg[k] = NIX_AF_MDQX_CIR(schq);
655 regval[k] = (cir.rate && cir.burst) ?
656 (nix_tm_shaper2regval(&cir) | 1) :
660 /* Configure RED ALG */
661 reg[k] = NIX_AF_MDQX_SHAPE(schq);
662 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
663 (uint64_t)node->pkt_mode << 24);
666 case NIX_TXSCH_LVL_TL4:
667 /* Configure PIR, CIR */
668 reg[k] = NIX_AF_TL4X_PIR(schq);
669 regval[k] = (pir.rate && pir.burst) ?
670 (nix_tm_shaper2regval(&pir) | 1) :
674 reg[k] = NIX_AF_TL4X_CIR(schq);
675 regval[k] = (cir.rate && cir.burst) ?
676 (nix_tm_shaper2regval(&cir) | 1) :
680 /* Configure RED algo */
681 reg[k] = NIX_AF_TL4X_SHAPE(schq);
682 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
683 (uint64_t)node->pkt_mode << 24);
686 case NIX_TXSCH_LVL_TL3:
687 /* Configure PIR, CIR */
688 reg[k] = NIX_AF_TL3X_PIR(schq);
689 regval[k] = (pir.rate && pir.burst) ?
690 (nix_tm_shaper2regval(&pir) | 1) :
694 reg[k] = NIX_AF_TL3X_CIR(schq);
695 regval[k] = (cir.rate && cir.burst) ?
696 (nix_tm_shaper2regval(&cir) | 1) :
700 /* Configure RED algo */
701 reg[k] = NIX_AF_TL3X_SHAPE(schq);
702 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
703 (uint64_t)node->pkt_mode);
707 case NIX_TXSCH_LVL_TL2:
708 /* Configure PIR, CIR */
709 reg[k] = NIX_AF_TL2X_PIR(schq);
710 regval[k] = (pir.rate && pir.burst) ?
711 (nix_tm_shaper2regval(&pir) | 1) :
715 reg[k] = NIX_AF_TL2X_CIR(schq);
716 regval[k] = (cir.rate && cir.burst) ?
717 (nix_tm_shaper2regval(&cir) | 1) :
721 /* Configure RED algo */
722 reg[k] = NIX_AF_TL2X_SHAPE(schq);
723 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
724 (uint64_t)node->pkt_mode << 24);
728 case NIX_TXSCH_LVL_TL1:
730 reg[k] = NIX_AF_TL1X_CIR(schq);
731 regval[k] = (cir.rate && cir.burst) ?
732 (nix_tm_shaper2regval(&cir) | 1) :
736 /* Configure length disable and adjust */
737 reg[k] = NIX_AF_TL1X_SHAPE(schq);
738 regval[k] = (adjust | (uint64_t)node->pkt_mode << 24);
747 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
748 volatile uint64_t *reg, volatile uint64_t *regval)
750 uint32_t hw_lvl = node->hw_lvl;
751 uint32_t schq = node->hw_id;
754 plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
755 nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable,
761 case NIX_TXSCH_LVL_MDQ:
762 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
765 case NIX_TXSCH_LVL_TL4:
766 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
769 case NIX_TXSCH_LVL_TL3:
770 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
773 case NIX_TXSCH_LVL_TL2:
774 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
777 case NIX_TXSCH_LVL_TL1:
778 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
788 /* Search for min rate in topology */
790 nix_tm_shaper_profile_rate_min(struct nix *nix)
792 struct nix_tm_shaper_profile *profile;
793 uint64_t rate_min = 1E9; /* 1 Gbps */
795 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
796 if (profile->peak.rate && profile->peak.rate < rate_min)
797 rate_min = profile->peak.rate;
799 if (profile->commit.rate && profile->commit.rate < rate_min)
800 rate_min = profile->commit.rate;
806 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig)
808 uint32_t pos = 0, start_pos = 0;
809 struct plt_bitmap *bmp;
813 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
814 plt_bitmap_scan_init(bmp);
816 if (!plt_bitmap_scan(bmp, &pos, &slab))
822 count += __builtin_popcountll(slab);
823 if (!plt_bitmap_scan(bmp, &pos, &slab))
825 } while (pos != start_pos);
831 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq,
832 enum roc_nix_tm_tree tree)
834 struct nix_tm_node_list *list;
835 uint8_t contig_cnt, hw_lvl;
836 struct nix_tm_node *parent;
837 uint16_t cnt = 0, avail;
839 list = nix_tm_node_list(nix, tree);
840 /* Walk through parents from TL1..TL4 */
841 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
842 TAILQ_FOREACH(parent, list, node) {
843 if (hw_lvl != parent->hw_lvl)
846 /* Skip accounting for children whose
847 * parent does not indicate so.
849 if (!parent->child_realloc)
852 /* Count children needed */
853 schq[hw_lvl - 1] += parent->rr_num;
854 if (parent->max_prio != UINT32_MAX) {
855 contig_cnt = parent->max_prio + 1;
856 schq_contig[hw_lvl - 1] += contig_cnt;
857 /* When we have SP + DWRR at a parent,
858 * we will always have a spare schq at rr prio
859 * location in contiguous queues. Hence reduce
860 * discontiguous count by 1.
862 if (parent->max_prio > 0 && parent->rr_num)
863 schq[hw_lvl - 1] -= 1;
868 schq[nix->tm_root_lvl] = 1;
869 if (!nix_tm_have_tl1_access(nix))
870 schq[NIX_TXSCH_LVL_TL1] = 1;
872 /* Now check for existing resources */
873 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
874 avail = nix_tm_resource_avail(nix, hw_lvl, false);
875 if (schq[hw_lvl] <= avail)
878 schq[hw_lvl] -= avail;
880 /* For contiguous queues, realloc everything */
881 avail = nix_tm_resource_avail(nix, hw_lvl, true);
882 if (schq_contig[hw_lvl] <= avail)
883 schq_contig[hw_lvl] = 0;
886 cnt += schq_contig[hw_lvl];
888 plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u",
889 nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl],
890 schq_contig[hw_lvl]);
897 roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix)
899 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
900 struct nix_tm_node_list *list;
901 struct nix_tm_node *node;
902 uint16_t leaf_cnt = 0;
904 /* Count leafs only in user list */
905 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
906 TAILQ_FOREACH(node, list, node) {
907 if (node->id < nix->nb_tx_queues)
915 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id)
917 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
918 struct nix_tm_node *node;
920 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
922 return NIX_ERR_TM_INVALID_NODE;
927 struct roc_nix_tm_node *
928 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id)
930 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
931 struct nix_tm_node *node;
933 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
934 return (struct roc_nix_tm_node *)node;
937 struct roc_nix_tm_node *
938 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev)
940 struct nix_tm_node *prev = (struct nix_tm_node *)__prev;
941 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
942 struct nix_tm_node_list *list;
944 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
946 /* HEAD of the list */
948 return (struct roc_nix_tm_node *)TAILQ_FIRST(list);
951 if (prev->tree != ROC_NIX_TM_USER)
954 return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node);
957 struct roc_nix_tm_shaper_profile *
958 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id)
960 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
961 struct nix_tm_shaper_profile *profile;
963 profile = nix_tm_shaper_profile_search(nix, profile_id);
964 return (struct roc_nix_tm_shaper_profile *)profile;
967 struct roc_nix_tm_shaper_profile *
968 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix,
969 struct roc_nix_tm_shaper_profile *__prev)
971 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
972 struct nix_tm_shaper_profile_list *list;
973 struct nix_tm_shaper_profile *prev;
975 prev = (struct nix_tm_shaper_profile *)__prev;
976 list = &nix->shaper_profile_list;
978 /* HEAD of the list */
980 return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list);
982 return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper);
986 nix_tm_node_alloc(void)
988 struct nix_tm_node *node;
990 node = plt_zmalloc(sizeof(struct nix_tm_node), 0);
994 node->free_fn = plt_free;
999 nix_tm_node_free(struct nix_tm_node *node)
1001 if (!node || node->free_fn == NULL)
1004 (node->free_fn)(node);
1007 struct nix_tm_shaper_profile *
1008 nix_tm_shaper_profile_alloc(void)
1010 struct nix_tm_shaper_profile *profile;
1012 profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0);
1016 profile->free_fn = plt_free;
1021 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile)
1023 if (!profile || !profile->free_fn)
1026 (profile->free_fn)(profile);
1030 roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
1031 struct roc_nix_tm_node_stats *n_stats)
1033 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1034 struct mbox *mbox = (&nix->dev)->mbox;
1035 struct nix_txschq_config *req, *rsp;
1036 struct nix_tm_node *node;
1040 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
1042 return NIX_ERR_TM_INVALID_NODE;
1044 if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
1045 return NIX_ERR_OP_NOTSUP;
1047 /* Check if node has HW resource */
1048 if (!(node->flags & NIX_TM_NODE_HWRES))
1052 /* Skip fetch if not requested */
1056 memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
1058 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1060 req->lvl = NIX_TXSCH_LVL_TL1;
1063 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1064 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1065 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1066 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1067 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1068 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1069 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1070 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1073 rc = mbox_process_msg(mbox, (void **)&rsp);
1078 n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0];
1079 n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1];
1080 n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2];
1081 n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3];
1082 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4];
1083 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5];
1084 n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6];
1085 n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7];
1091 /* Clear all the stats */
1092 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1093 req->lvl = NIX_TXSCH_LVL_TL1;
1095 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1096 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1097 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1098 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1099 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1100 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1101 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1102 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1105 return mbox_process_msg(mbox, (void **)&rsp);
1109 roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix)
1111 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1113 if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) &&
1114 (nix->tm_tree == ROC_NIX_TM_USER))
1120 roc_nix_tm_tree_type_get(struct roc_nix *roc_nix)
1122 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1124 return nix->tm_tree;
1128 roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl)
1130 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1131 int hw_lvl = nix_tm_lvl2nix(nix, lvl);
1133 return nix_tm_max_prio(nix, hw_lvl);
1137 roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl)
1139 return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl);
1143 roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
1144 struct roc_nix_tm_shaper_profile *roc_prof)
1146 struct nix_tm_node *tm_node = (struct nix_tm_node *)node;
1147 struct nix_tm_shaper_profile *profile;
1148 struct nix_tm_shaper_data cir, pir;
1150 profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
1151 tm_node->red_algo = NIX_REDALG_STD;
1153 /* C0 doesn't support STALL when both PIR & CIR are enabled */
1154 if (profile && roc_model_is_cn96_cx()) {
1155 nix_tm_shaper_conf_get(profile, &cir, &pir);
1157 if (pir.rate && cir.rate)
1158 tm_node->red_algo = NIX_REDALG_DISCARD;
1163 roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix)
1165 if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix)))
1166 return NIX_TXSCH_LVL_CNT;
1168 return (NIX_TXSCH_LVL_CNT - 1);
1172 roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl)
1174 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1176 if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1)