1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
11 return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
12 (shaper->div_exp << 13) | (shaper->exponent << 9) |
13 (shaper->mantissa << 1);
17 nix_tm_lvl2nix_tl1_root(uint32_t lvl)
21 return NIX_TXSCH_LVL_TL1;
23 return NIX_TXSCH_LVL_TL2;
25 return NIX_TXSCH_LVL_TL3;
27 return NIX_TXSCH_LVL_TL4;
29 return NIX_TXSCH_LVL_SMQ;
31 return NIX_TXSCH_LVL_CNT;
36 nix_tm_lvl2nix_tl2_root(uint32_t lvl)
40 return NIX_TXSCH_LVL_TL2;
42 return NIX_TXSCH_LVL_TL3;
44 return NIX_TXSCH_LVL_TL4;
46 return NIX_TXSCH_LVL_SMQ;
48 return NIX_TXSCH_LVL_CNT;
53 nix_tm_lvl2nix(struct nix *nix, uint32_t lvl)
55 if (nix_tm_have_tl1_access(nix))
56 return nix_tm_lvl2nix_tl1_root(lvl);
58 return nix_tm_lvl2nix_tl2_root(lvl);
62 nix_tm_relchan_get(struct nix *nix)
64 return nix->tx_chan_base & 0xff;
68 nix_tm_find_prio_anchor(struct nix *nix, uint32_t node_id,
69 enum roc_nix_tm_tree tree)
71 struct nix_tm_node *child_node;
72 struct nix_tm_node_list *list;
74 list = nix_tm_node_list(nix, tree);
76 TAILQ_FOREACH(child_node, list, node) {
77 if (!child_node->parent)
79 if (!(child_node->parent->id == node_id))
81 if (child_node->priority == child_node->parent->rr_prio)
83 return child_node->hw_id - child_node->priority;
88 struct nix_tm_shaper_profile *
89 nix_tm_shaper_profile_search(struct nix *nix, uint32_t id)
91 struct nix_tm_shaper_profile *profile;
93 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
94 if (profile->id == id)
101 nix_tm_node_search(struct nix *nix, uint32_t node_id, enum roc_nix_tm_tree tree)
103 struct nix_tm_node_list *list;
104 struct nix_tm_node *node;
106 list = nix_tm_node_list(nix, tree);
107 TAILQ_FOREACH(node, list, node) {
108 if (node->id == node_id)
115 nix_tm_shaper_rate_conv(uint64_t value, uint64_t *exponent_p,
116 uint64_t *mantissa_p, uint64_t *div_exp_p)
118 uint64_t div_exp, exponent, mantissa;
120 /* Boundary checks */
121 if (value < NIX_TM_MIN_SHAPER_RATE || value > NIX_TM_MAX_SHAPER_RATE)
124 if (value <= NIX_TM_SHAPER_RATE(0, 0, 0)) {
125 /* Calculate rate div_exp and mantissa using
126 * the following formula:
128 * value = (2E6 * (256 + mantissa)
129 * / ((1 << div_exp) * 256))
133 mantissa = NIX_TM_MAX_RATE_MANTISSA;
135 while (value < (NIX_TM_SHAPER_RATE_CONST / (1 << div_exp)))
138 while (value < ((NIX_TM_SHAPER_RATE_CONST * (256 + mantissa)) /
139 ((1 << div_exp) * 256)))
142 /* Calculate rate exponent and mantissa using
143 * the following formula:
145 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
149 exponent = NIX_TM_MAX_RATE_EXPONENT;
150 mantissa = NIX_TM_MAX_RATE_MANTISSA;
152 while (value < (NIX_TM_SHAPER_RATE_CONST * (1 << exponent)))
155 while (value < ((NIX_TM_SHAPER_RATE_CONST *
156 ((256 + mantissa) << exponent)) /
161 if (div_exp > NIX_TM_MAX_RATE_DIV_EXP ||
162 exponent > NIX_TM_MAX_RATE_EXPONENT ||
163 mantissa > NIX_TM_MAX_RATE_MANTISSA)
167 *div_exp_p = div_exp;
169 *exponent_p = exponent;
171 *mantissa_p = mantissa;
173 /* Calculate real rate value */
174 return NIX_TM_SHAPER_RATE(exponent, mantissa, div_exp);
178 nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
179 uint64_t *mantissa_p)
181 uint64_t exponent, mantissa;
183 if (value < NIX_TM_MIN_SHAPER_BURST || value > NIX_TM_MAX_SHAPER_BURST)
186 /* Calculate burst exponent and mantissa using
187 * the following formula:
189 * value = (((256 + mantissa) << (exponent + 1)
193 exponent = NIX_TM_MAX_BURST_EXPONENT;
194 mantissa = NIX_TM_MAX_BURST_MANTISSA;
196 while (value < (1ull << (exponent + 1)))
199 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
202 if (exponent > NIX_TM_MAX_BURST_EXPONENT ||
203 mantissa > NIX_TM_MAX_BURST_MANTISSA)
207 *exponent_p = exponent;
209 *mantissa_p = mantissa;
211 return NIX_TM_SHAPER_BURST(exponent, mantissa);
215 nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
216 struct nix_tm_shaper_data *cir,
217 struct nix_tm_shaper_data *pir)
222 /* Calculate CIR exponent and mantissa */
223 if (profile->commit.rate)
224 cir->rate = nix_tm_shaper_rate_conv(
225 profile->commit.rate, &cir->exponent, &cir->mantissa,
228 /* Calculate PIR exponent and mantissa */
229 if (profile->peak.rate)
230 pir->rate = nix_tm_shaper_rate_conv(
231 profile->peak.rate, &pir->exponent, &pir->mantissa,
234 /* Calculate CIR burst exponent and mantissa */
235 if (profile->commit.size)
236 cir->burst = nix_tm_shaper_burst_conv(profile->commit.size,
237 &cir->burst_exponent,
238 &cir->burst_mantissa);
240 /* Calculate PIR burst exponent and mantissa */
241 if (profile->peak.size)
242 pir->burst = nix_tm_shaper_burst_conv(profile->peak.size,
243 &pir->burst_exponent,
244 &pir->burst_mantissa);
248 nix_tm_check_rr(struct nix *nix, uint32_t parent_id, enum roc_nix_tm_tree tree,
249 uint32_t *rr_prio, uint32_t *max_prio)
251 uint32_t node_cnt[NIX_TM_TLX_SP_PRIO_MAX];
252 struct nix_tm_node_list *list;
253 struct nix_tm_node *node;
254 uint32_t rr_num = 0, i;
255 uint32_t children = 0;
258 memset(node_cnt, 0, sizeof(node_cnt));
260 *max_prio = UINT32_MAX;
262 list = nix_tm_node_list(nix, tree);
263 TAILQ_FOREACH(node, list, node) {
267 if (!(node->parent->id == parent_id))
270 priority = node->priority;
271 node_cnt[priority]++;
275 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++) {
279 if (node_cnt[i] > rr_num) {
281 rr_num = node_cnt[i];
285 /* RR group of single RR child is considered as SP */
291 /* Max prio will be returned only when we have non zero prio
292 * or if a parent has single child.
294 if (i > 1 || (children == 1))
300 nix_tm_max_prio(struct nix *nix, uint16_t hw_lvl)
302 if (hw_lvl >= NIX_TXSCH_LVL_CNT)
305 /* MDQ does not support SP */
306 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
309 /* PF's TL1 with VF's enabled does not support SP */
310 if (hw_lvl == NIX_TXSCH_LVL_TL1 && (!nix_tm_have_tl1_access(nix) ||
311 (nix->tm_flags & NIX_TM_TL1_NO_SP)))
314 return NIX_TM_TLX_SP_PRIO_MAX - 1;
318 nix_tm_validate_prio(struct nix *nix, uint32_t lvl, uint32_t parent_id,
319 uint32_t priority, enum roc_nix_tm_tree tree)
321 uint8_t priorities[NIX_TM_TLX_SP_PRIO_MAX];
322 struct nix_tm_node_list *list;
323 struct nix_tm_node *node;
327 list = nix_tm_node_list(nix, tree);
328 /* Validate priority against max */
329 if (priority > nix_tm_max_prio(nix, nix_tm_lvl2nix(nix, lvl - 1)))
330 return NIX_ERR_TM_PRIO_EXCEEDED;
332 if (parent_id == ROC_NIX_TM_NODE_ID_INVALID)
335 memset(priorities, 0, sizeof(priorities));
336 priorities[priority] = 1;
338 TAILQ_FOREACH(node, list, node) {
342 if (node->parent->id != parent_id)
345 priorities[node->priority]++;
348 for (i = 0; i < NIX_TM_TLX_SP_PRIO_MAX; i++)
349 if (priorities[i] > 1)
352 /* At max, one rr groups per parent */
354 return NIX_ERR_TM_MULTIPLE_RR_GROUPS;
356 /* Check for previous priority to avoid holes in priorities */
357 if (priority && !priorities[priority - 1])
358 return NIX_ERR_TM_PRIO_ORDER;
364 nix_tm_child_res_valid(struct nix_tm_node_list *list,
365 struct nix_tm_node *parent)
367 struct nix_tm_node *child;
369 TAILQ_FOREACH(child, list, node) {
370 if (child->parent != parent)
372 if (!(child->flags & NIX_TM_NODE_HWRES))
379 nix_tm_tl1_default_prep(uint32_t schq, volatile uint64_t *reg,
380 volatile uint64_t *regval)
385 * Default config for TL1.
386 * For VF this is always ignored.
388 plt_tm_dbg("Default config for main root %s(%u)",
389 nix_tm_hwlvl2str(NIX_TXSCH_LVL_TL1), schq);
391 /* Set DWRR quantum */
392 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
393 regval[k] = NIX_TM_TL1_DFLT_RR_QTM;
396 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
397 regval[k] = (NIX_TM_TL1_DFLT_RR_PRIO << 1);
400 reg[k] = NIX_AF_TL1X_CIR(schq);
408 nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node,
409 volatile uint64_t *reg, volatile uint64_t *regval,
410 volatile uint64_t *regval_mask)
412 struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix);
413 uint8_t k = 0, hw_lvl, parent_lvl;
414 uint64_t parent = 0, child = 0;
415 enum roc_nix_tm_tree tree;
416 uint32_t rr_prio, schq;
417 uint16_t link, relchan;
421 hw_lvl = node->hw_lvl;
422 parent_lvl = hw_lvl + 1;
423 rr_prio = node->rr_prio;
425 /* Root node will not have a parent node */
426 if (hw_lvl == nix->tm_root_lvl)
427 parent = node->parent_hw_id;
429 parent = node->parent->hw_id;
432 relchan = nix_tm_relchan_get(nix);
434 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
435 child = nix_tm_find_prio_anchor(nix, node->id, tree);
437 /* Override default rr_prio when TL1
438 * Static Priority is disabled
440 if (hw_lvl == NIX_TXSCH_LVL_TL1 && nix->tm_flags & NIX_TM_TL1_NO_SP) {
441 rr_prio = NIX_TM_TL1_DFLT_RR_PRIO;
445 plt_tm_dbg("Topology config node %s(%u)->%s(%" PRIu64 ") lvl %u, id %u"
446 " prio_anchor %" PRIu64 " rr_prio %u (%p)",
447 nix_tm_hwlvl2str(hw_lvl), schq, nix_tm_hwlvl2str(parent_lvl),
448 parent, node->lvl, node->id, child, rr_prio, node);
450 /* Prepare Topology and Link config */
452 case NIX_TXSCH_LVL_SMQ:
454 /* Set xoff which will be cleared later */
455 reg[k] = NIX_AF_SMQX_CFG(schq);
456 regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS |
457 ((nix->mtu & 0xFFFF) << 8));
458 /* Maximum Vtag insertion size as a multiple of four bytes */
459 if (roc_nix->hw_vlan_ins)
460 regval[k] |= (0x2ULL << 36);
461 regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) |
462 GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36));
465 /* Parent and schedule conf */
466 reg[k] = NIX_AF_MDQX_PARENT(schq);
467 regval[k] = parent << 16;
471 case NIX_TXSCH_LVL_TL4:
472 /* Parent and schedule conf */
473 reg[k] = NIX_AF_TL4X_PARENT(schq);
474 regval[k] = parent << 16;
477 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
478 regval[k] = (child << 32) | (rr_prio << 1);
481 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
483 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
484 regval[k] = BIT_ULL(12);
488 case NIX_TXSCH_LVL_TL3:
489 /* Parent and schedule conf */
490 reg[k] = NIX_AF_TL3X_PARENT(schq);
491 regval[k] = parent << 16;
494 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
495 regval[k] = (child << 32) | (rr_prio << 1);
498 /* Link configuration */
499 if (!nix->sdp_link &&
500 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
501 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
502 regval[k] = BIT_ULL(12) | relchan;
507 case NIX_TXSCH_LVL_TL2:
508 /* Parent and schedule conf */
509 reg[k] = NIX_AF_TL2X_PARENT(schq);
510 regval[k] = parent << 16;
513 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
514 regval[k] = (child << 32) | (rr_prio << 1);
517 /* Link configuration */
518 if (!nix->sdp_link &&
519 nix->tm_link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
520 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
521 regval[k] = BIT_ULL(12) | relchan;
526 case NIX_TXSCH_LVL_TL1:
527 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
528 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
538 nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
539 volatile uint64_t *reg, volatile uint64_t *regval)
541 uint64_t strict_prio = node->priority;
542 uint32_t hw_lvl = node->hw_lvl;
543 uint32_t schq = node->hw_id;
547 rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
549 /* For children to root, strict prio is default if either
550 * device root is TL2 or TL1 Static Priority is disabled.
552 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
553 (!nix_tm_have_tl1_access(nix) || nix->tm_flags & NIX_TM_TL1_NO_SP))
554 strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
556 plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
557 "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
558 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
559 strict_prio, rr_quantum, node);
562 case NIX_TXSCH_LVL_SMQ:
563 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
564 regval[k] = (strict_prio << 24) | rr_quantum;
568 case NIX_TXSCH_LVL_TL4:
569 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
570 regval[k] = (strict_prio << 24) | rr_quantum;
574 case NIX_TXSCH_LVL_TL3:
575 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
576 regval[k] = (strict_prio << 24) | rr_quantum;
580 case NIX_TXSCH_LVL_TL2:
581 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
582 regval[k] = (strict_prio << 24) | rr_quantum;
586 case NIX_TXSCH_LVL_TL1:
587 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
588 regval[k] = rr_quantum;
598 nix_tm_shaper_reg_prep(struct nix_tm_node *node,
599 struct nix_tm_shaper_profile *profile,
600 volatile uint64_t *reg, volatile uint64_t *regval)
602 struct nix_tm_shaper_data cir, pir;
603 uint32_t schq = node->hw_id;
607 memset(&cir, 0, sizeof(cir));
608 memset(&pir, 0, sizeof(pir));
609 nix_tm_shaper_conf_get(profile, &cir, &pir);
614 adjust = profile->pkt_len_adj;
616 plt_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
617 "pir %" PRIu64 "(%" PRIu64 "B),"
618 " cir %" PRIu64 "(%" PRIu64 "B)"
619 "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
620 nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
621 pir.rate, pir.burst, cir.rate, cir.burst, adjust,
622 node->pkt_mode, node);
624 switch (node->hw_lvl) {
625 case NIX_TXSCH_LVL_SMQ:
626 /* Configure PIR, CIR */
627 reg[k] = NIX_AF_MDQX_PIR(schq);
628 regval[k] = (pir.rate && pir.burst) ?
629 (nix_tm_shaper2regval(&pir) | 1) :
633 reg[k] = NIX_AF_MDQX_CIR(schq);
634 regval[k] = (cir.rate && cir.burst) ?
635 (nix_tm_shaper2regval(&cir) | 1) :
639 /* Configure RED ALG */
640 reg[k] = NIX_AF_MDQX_SHAPE(schq);
641 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
642 (uint64_t)node->pkt_mode << 24);
645 case NIX_TXSCH_LVL_TL4:
646 /* Configure PIR, CIR */
647 reg[k] = NIX_AF_TL4X_PIR(schq);
648 regval[k] = (pir.rate && pir.burst) ?
649 (nix_tm_shaper2regval(&pir) | 1) :
653 reg[k] = NIX_AF_TL4X_CIR(schq);
654 regval[k] = (cir.rate && cir.burst) ?
655 (nix_tm_shaper2regval(&cir) | 1) :
659 /* Configure RED algo */
660 reg[k] = NIX_AF_TL4X_SHAPE(schq);
661 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
662 (uint64_t)node->pkt_mode << 24);
665 case NIX_TXSCH_LVL_TL3:
666 /* Configure PIR, CIR */
667 reg[k] = NIX_AF_TL3X_PIR(schq);
668 regval[k] = (pir.rate && pir.burst) ?
669 (nix_tm_shaper2regval(&pir) | 1) :
673 reg[k] = NIX_AF_TL3X_CIR(schq);
674 regval[k] = (cir.rate && cir.burst) ?
675 (nix_tm_shaper2regval(&cir) | 1) :
679 /* Configure RED algo */
680 reg[k] = NIX_AF_TL3X_SHAPE(schq);
681 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
682 (uint64_t)node->pkt_mode);
686 case NIX_TXSCH_LVL_TL2:
687 /* Configure PIR, CIR */
688 reg[k] = NIX_AF_TL2X_PIR(schq);
689 regval[k] = (pir.rate && pir.burst) ?
690 (nix_tm_shaper2regval(&pir) | 1) :
694 reg[k] = NIX_AF_TL2X_CIR(schq);
695 regval[k] = (cir.rate && cir.burst) ?
696 (nix_tm_shaper2regval(&cir) | 1) :
700 /* Configure RED algo */
701 reg[k] = NIX_AF_TL2X_SHAPE(schq);
702 regval[k] = (adjust | (uint64_t)node->red_algo << 9 |
703 (uint64_t)node->pkt_mode << 24);
707 case NIX_TXSCH_LVL_TL1:
709 reg[k] = NIX_AF_TL1X_CIR(schq);
710 regval[k] = (cir.rate && cir.burst) ?
711 (nix_tm_shaper2regval(&cir) | 1) :
715 /* Configure length disable and adjust */
716 reg[k] = NIX_AF_TL1X_SHAPE(schq);
717 regval[k] = (adjust | (uint64_t)node->pkt_mode << 24);
726 nix_tm_sw_xoff_prep(struct nix_tm_node *node, bool enable,
727 volatile uint64_t *reg, volatile uint64_t *regval)
729 uint32_t hw_lvl = node->hw_lvl;
730 uint32_t schq = node->hw_id;
733 plt_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
734 nix_tm_hwlvl2str(hw_lvl), schq, node->lvl, node->id, enable,
740 case NIX_TXSCH_LVL_MDQ:
741 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
744 case NIX_TXSCH_LVL_TL4:
745 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
748 case NIX_TXSCH_LVL_TL3:
749 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
752 case NIX_TXSCH_LVL_TL2:
753 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
756 case NIX_TXSCH_LVL_TL1:
757 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
767 /* Search for min rate in topology */
769 nix_tm_shaper_profile_rate_min(struct nix *nix)
771 struct nix_tm_shaper_profile *profile;
772 uint64_t rate_min = 1E9; /* 1 Gbps */
774 TAILQ_FOREACH(profile, &nix->shaper_profile_list, shaper) {
775 if (profile->peak.rate && profile->peak.rate < rate_min)
776 rate_min = profile->peak.rate;
778 if (profile->commit.rate && profile->commit.rate < rate_min)
779 rate_min = profile->commit.rate;
785 nix_tm_resource_avail(struct nix *nix, uint8_t hw_lvl, bool contig)
787 uint32_t pos = 0, start_pos = 0;
788 struct plt_bitmap *bmp;
792 bmp = contig ? nix->schq_contig_bmp[hw_lvl] : nix->schq_bmp[hw_lvl];
793 plt_bitmap_scan_init(bmp);
795 if (!plt_bitmap_scan(bmp, &pos, &slab))
801 count += __builtin_popcountll(slab);
802 if (!plt_bitmap_scan(bmp, &pos, &slab))
804 } while (pos != start_pos);
810 nix_tm_resource_estimate(struct nix *nix, uint16_t *schq_contig, uint16_t *schq,
811 enum roc_nix_tm_tree tree)
813 struct nix_tm_node_list *list;
814 uint8_t contig_cnt, hw_lvl;
815 struct nix_tm_node *parent;
816 uint16_t cnt = 0, avail;
818 list = nix_tm_node_list(nix, tree);
819 /* Walk through parents from TL1..TL4 */
820 for (hw_lvl = NIX_TXSCH_LVL_TL1; hw_lvl > 0; hw_lvl--) {
821 TAILQ_FOREACH(parent, list, node) {
822 if (hw_lvl != parent->hw_lvl)
825 /* Skip accounting for children whose
826 * parent does not indicate so.
828 if (!parent->child_realloc)
831 /* Count children needed */
832 schq[hw_lvl - 1] += parent->rr_num;
833 if (parent->max_prio != UINT32_MAX) {
834 contig_cnt = parent->max_prio + 1;
835 schq_contig[hw_lvl - 1] += contig_cnt;
836 /* When we have SP + DWRR at a parent,
837 * we will always have a spare schq at rr prio
838 * location in contiguous queues. Hence reduce
839 * discontiguous count by 1.
841 if (parent->max_prio > 0 && parent->rr_num)
842 schq[hw_lvl - 1] -= 1;
847 schq[nix->tm_root_lvl] = 1;
848 if (!nix_tm_have_tl1_access(nix))
849 schq[NIX_TXSCH_LVL_TL1] = 1;
851 /* Now check for existing resources */
852 for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
853 avail = nix_tm_resource_avail(nix, hw_lvl, false);
854 if (schq[hw_lvl] <= avail)
857 schq[hw_lvl] -= avail;
859 /* For contiguous queues, realloc everything */
860 avail = nix_tm_resource_avail(nix, hw_lvl, true);
861 if (schq_contig[hw_lvl] <= avail)
862 schq_contig[hw_lvl] = 0;
865 cnt += schq_contig[hw_lvl];
867 plt_tm_dbg("Estimate resources needed for %s: dis %u cont %u",
868 nix_tm_hwlvl2str(hw_lvl), schq[hw_lvl],
869 schq_contig[hw_lvl]);
876 roc_nix_tm_leaf_cnt(struct roc_nix *roc_nix)
878 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
879 struct nix_tm_node_list *list;
880 struct nix_tm_node *node;
881 uint16_t leaf_cnt = 0;
883 /* Count leafs only in user list */
884 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
885 TAILQ_FOREACH(node, list, node) {
886 if (node->id < nix->nb_tx_queues)
894 roc_nix_tm_node_lvl(struct roc_nix *roc_nix, uint32_t node_id)
896 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
897 struct nix_tm_node *node;
899 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
901 return NIX_ERR_TM_INVALID_NODE;
906 struct roc_nix_tm_node *
907 roc_nix_tm_node_get(struct roc_nix *roc_nix, uint32_t node_id)
909 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
910 struct nix_tm_node *node;
912 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
913 return (struct roc_nix_tm_node *)node;
916 struct roc_nix_tm_node *
917 roc_nix_tm_node_next(struct roc_nix *roc_nix, struct roc_nix_tm_node *__prev)
919 struct nix_tm_node *prev = (struct nix_tm_node *)__prev;
920 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
921 struct nix_tm_node_list *list;
923 list = nix_tm_node_list(nix, ROC_NIX_TM_USER);
925 /* HEAD of the list */
927 return (struct roc_nix_tm_node *)TAILQ_FIRST(list);
930 if (prev->tree != ROC_NIX_TM_USER)
933 return (struct roc_nix_tm_node *)TAILQ_NEXT(prev, node);
936 struct roc_nix_tm_shaper_profile *
937 roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id)
939 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
940 struct nix_tm_shaper_profile *profile;
942 profile = nix_tm_shaper_profile_search(nix, profile_id);
943 return (struct roc_nix_tm_shaper_profile *)profile;
946 struct roc_nix_tm_shaper_profile *
947 roc_nix_tm_shaper_profile_next(struct roc_nix *roc_nix,
948 struct roc_nix_tm_shaper_profile *__prev)
950 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
951 struct nix_tm_shaper_profile_list *list;
952 struct nix_tm_shaper_profile *prev;
954 prev = (struct nix_tm_shaper_profile *)__prev;
955 list = &nix->shaper_profile_list;
957 /* HEAD of the list */
959 return (struct roc_nix_tm_shaper_profile *)TAILQ_FIRST(list);
961 return (struct roc_nix_tm_shaper_profile *)TAILQ_NEXT(prev, shaper);
965 nix_tm_node_alloc(void)
967 struct nix_tm_node *node;
969 node = plt_zmalloc(sizeof(struct nix_tm_node), 0);
973 node->free_fn = plt_free;
978 nix_tm_node_free(struct nix_tm_node *node)
980 if (!node || node->free_fn == NULL)
983 (node->free_fn)(node);
986 struct nix_tm_shaper_profile *
987 nix_tm_shaper_profile_alloc(void)
989 struct nix_tm_shaper_profile *profile;
991 profile = plt_zmalloc(sizeof(struct nix_tm_shaper_profile), 0);
995 profile->free_fn = plt_free;
1000 nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile)
1002 if (!profile || !profile->free_fn)
1005 (profile->free_fn)(profile);
1009 roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
1010 struct roc_nix_tm_node_stats *n_stats)
1012 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
1013 struct mbox *mbox = (&nix->dev)->mbox;
1014 struct nix_txschq_config *req, *rsp;
1015 struct nix_tm_node *node;
1019 node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER);
1021 return NIX_ERR_TM_INVALID_NODE;
1023 if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
1024 return NIX_ERR_OP_NOTSUP;
1027 /* Skip fetch if not requested */
1031 memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
1032 /* Check if node has HW resource */
1033 if (!(node->flags & NIX_TM_NODE_HWRES))
1036 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1038 req->lvl = NIX_TXSCH_LVL_TL1;
1041 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1042 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1043 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1044 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1045 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1046 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1047 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1048 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1051 rc = mbox_process_msg(mbox, (void **)&rsp);
1056 n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0];
1057 n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1];
1058 n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2];
1059 n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3];
1060 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4];
1061 n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5];
1062 n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6];
1063 n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7];
1069 /* Clear all the stats */
1070 req = mbox_alloc_msg_nix_txschq_cfg(mbox);
1071 req->lvl = NIX_TXSCH_LVL_TL1;
1073 req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
1074 req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq);
1075 req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq);
1076 req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq);
1077 req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq);
1078 req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq);
1079 req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq);
1080 req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq);
1083 return mbox_process_msg(mbox, (void **)&rsp);