1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_malloc.h>
7 #include "otx2_ethdev.h"
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
13 enum otx2_tm_node_level {
24 uint64_t shaper2regval(struct shaper_params *shaper)
26 return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28 (shaper->mantissa << 1);
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
34 int link = 13 /* SDP */;
38 lmac_chan = dev->tx_chan_base;
41 if (lmac_chan >= 0x800) {
42 map = lmac_chan & 0x7FF;
43 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44 } else if (lmac_chan < 0x700) {
53 nix_get_relchan(struct otx2_eth_dev *dev)
55 return dev->tx_chan_base & 0xff;
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
61 bool is_lbk = otx2_dev_is_lbk(dev);
62 return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
68 if (nix_tm_have_tl1_access(dev))
69 return (lvl == OTX2_TM_LVL_QUEUE);
71 return (lvl == OTX2_TM_LVL_SCH4);
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
77 struct otx2_nix_tm_node *child_node;
79 TAILQ_FOREACH(child_node, &dev->node_list, node) {
80 if (!child_node->parent)
82 if (!(child_node->parent->id == node_id))
84 if (child_node->priority == child_node->parent->rr_prio)
86 return child_node->hw_id - child_node->priority;
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
95 struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
97 TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99 return tm_shaper_profile;
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106 uint64_t *mantissa_p, uint64_t *div_exp_p)
108 uint64_t div_exp, exponent, mantissa;
110 /* Boundary checks */
111 if (value < MIN_SHAPER_RATE ||
112 value > MAX_SHAPER_RATE)
115 if (value <= SHAPER_RATE(0, 0, 0)) {
116 /* Calculate rate div_exp and mantissa using
117 * the following formula:
119 * value = (2E6 * (256 + mantissa)
120 * / ((1 << div_exp) * 256))
124 mantissa = MAX_RATE_MANTISSA;
126 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
130 ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131 ((1 << div_exp) * 256)))
134 /* Calculate rate exponent and mantissa using
135 * the following formula:
137 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
141 exponent = MAX_RATE_EXPONENT;
142 mantissa = MAX_RATE_MANTISSA;
144 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
147 while (value < ((NIX_SHAPER_RATE_CONST *
148 ((256 + mantissa) << exponent)) / 256))
152 if (div_exp > MAX_RATE_DIV_EXP ||
153 exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
157 *div_exp_p = div_exp;
159 *exponent_p = exponent;
161 *mantissa_p = mantissa;
163 /* Calculate real rate value */
164 return SHAPER_RATE(exponent, mantissa, div_exp);
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169 uint64_t *mantissa_p)
171 uint64_t exponent, mantissa;
173 if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
176 /* Calculate burst exponent and mantissa using
177 * the following formula:
179 * value = (((256 + mantissa) << (exponent + 1)
183 exponent = MAX_BURST_EXPONENT;
184 mantissa = MAX_BURST_MANTISSA;
186 while (value < (1ull << (exponent + 1)))
189 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
192 if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
196 *exponent_p = exponent;
198 *mantissa_p = mantissa;
200 return SHAPER_BURST(exponent, mantissa);
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205 struct shaper_params *cir,
206 struct shaper_params *pir)
208 struct rte_tm_shaper_params *param = &profile->params;
213 /* Calculate CIR exponent and mantissa */
214 if (param->committed.rate)
215 cir->rate = shaper_rate_to_nix(param->committed.rate,
220 /* Calculate PIR exponent and mantissa */
221 if (param->peak.rate)
222 pir->rate = shaper_rate_to_nix(param->peak.rate,
227 /* Calculate CIR burst exponent and mantissa */
228 if (param->committed.size)
229 cir->burst = shaper_burst_to_nix(param->committed.size,
230 &cir->burst_exponent,
231 &cir->burst_mantissa);
233 /* Calculate PIR burst exponent and mantissa */
234 if (param->peak.size)
235 pir->burst = shaper_burst_to_nix(param->peak.size,
236 &pir->burst_exponent,
237 &pir->burst_mantissa);
241 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
243 struct otx2_mbox *mbox = dev->mbox;
244 struct nix_txschq_config *req;
247 * Default config for TL1.
248 * For VF this is always ignored.
251 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
252 req->lvl = NIX_TXSCH_LVL_TL1;
254 /* Set DWRR quantum */
255 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
256 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
259 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
260 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
263 req->reg[2] = NIX_AF_TL1X_CIR(schq);
267 return otx2_mbox_process(mbox);
271 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
272 struct otx2_nix_tm_node *tm_node,
273 volatile uint64_t *reg, volatile uint64_t *regval)
275 uint64_t strict_prio = tm_node->priority;
276 uint32_t hw_lvl = tm_node->hw_lvl;
277 uint32_t schq = tm_node->hw_id;
281 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
283 /* For children to root, strict prio is default if either
284 * device root is TL2 or TL1 Static Priority is disabled.
286 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
287 (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
288 dev->tm_flags & NIX_TM_TL1_NO_SP))
289 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
291 otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
292 "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
293 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
294 tm_node->id, strict_prio, rr_quantum, tm_node);
297 case NIX_TXSCH_LVL_SMQ:
298 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
299 regval[k] = (strict_prio << 24) | rr_quantum;
303 case NIX_TXSCH_LVL_TL4:
304 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
305 regval[k] = (strict_prio << 24) | rr_quantum;
309 case NIX_TXSCH_LVL_TL3:
310 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
311 regval[k] = (strict_prio << 24) | rr_quantum;
315 case NIX_TXSCH_LVL_TL2:
316 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
317 regval[k] = (strict_prio << 24) | rr_quantum;
321 case NIX_TXSCH_LVL_TL1:
322 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
323 regval[k] = rr_quantum;
333 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
334 struct otx2_nix_tm_shaper_profile *profile,
335 volatile uint64_t *reg, volatile uint64_t *regval)
337 struct shaper_params cir, pir;
338 uint32_t schq = tm_node->hw_id;
341 memset(&cir, 0, sizeof(cir));
342 memset(&pir, 0, sizeof(pir));
343 shaper_config_to_nix(profile, &cir, &pir);
345 otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
346 "pir %" PRIu64 "(%" PRIu64 "B),"
347 " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
348 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
349 tm_node->id, pir.rate, pir.burst,
350 cir.rate, cir.burst, tm_node);
352 switch (tm_node->hw_lvl) {
353 case NIX_TXSCH_LVL_SMQ:
354 /* Configure PIR, CIR */
355 reg[k] = NIX_AF_MDQX_PIR(schq);
356 regval[k] = (pir.rate && pir.burst) ?
357 (shaper2regval(&pir) | 1) : 0;
360 reg[k] = NIX_AF_MDQX_CIR(schq);
361 regval[k] = (cir.rate && cir.burst) ?
362 (shaper2regval(&cir) | 1) : 0;
365 /* Configure RED ALG */
366 reg[k] = NIX_AF_MDQX_SHAPE(schq);
367 regval[k] = ((uint64_t)tm_node->red_algo << 9);
370 case NIX_TXSCH_LVL_TL4:
371 /* Configure PIR, CIR */
372 reg[k] = NIX_AF_TL4X_PIR(schq);
373 regval[k] = (pir.rate && pir.burst) ?
374 (shaper2regval(&pir) | 1) : 0;
377 reg[k] = NIX_AF_TL4X_CIR(schq);
378 regval[k] = (cir.rate && cir.burst) ?
379 (shaper2regval(&cir) | 1) : 0;
382 /* Configure RED algo */
383 reg[k] = NIX_AF_TL4X_SHAPE(schq);
384 regval[k] = ((uint64_t)tm_node->red_algo << 9);
387 case NIX_TXSCH_LVL_TL3:
388 /* Configure PIR, CIR */
389 reg[k] = NIX_AF_TL3X_PIR(schq);
390 regval[k] = (pir.rate && pir.burst) ?
391 (shaper2regval(&pir) | 1) : 0;
394 reg[k] = NIX_AF_TL3X_CIR(schq);
395 regval[k] = (cir.rate && cir.burst) ?
396 (shaper2regval(&cir) | 1) : 0;
399 /* Configure RED algo */
400 reg[k] = NIX_AF_TL3X_SHAPE(schq);
401 regval[k] = ((uint64_t)tm_node->red_algo << 9);
405 case NIX_TXSCH_LVL_TL2:
406 /* Configure PIR, CIR */
407 reg[k] = NIX_AF_TL2X_PIR(schq);
408 regval[k] = (pir.rate && pir.burst) ?
409 (shaper2regval(&pir) | 1) : 0;
412 reg[k] = NIX_AF_TL2X_CIR(schq);
413 regval[k] = (cir.rate && cir.burst) ?
414 (shaper2regval(&cir) | 1) : 0;
417 /* Configure RED algo */
418 reg[k] = NIX_AF_TL2X_SHAPE(schq);
419 regval[k] = ((uint64_t)tm_node->red_algo << 9);
423 case NIX_TXSCH_LVL_TL1:
425 reg[k] = NIX_AF_TL1X_CIR(schq);
426 regval[k] = (cir.rate && cir.burst) ?
427 (shaper2regval(&cir) | 1) : 0;
436 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
437 volatile uint64_t *reg, volatile uint64_t *regval)
439 uint32_t hw_lvl = tm_node->hw_lvl;
440 uint32_t schq = tm_node->hw_id;
443 otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
444 nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
445 tm_node->id, enable, tm_node);
450 case NIX_TXSCH_LVL_MDQ:
451 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
454 case NIX_TXSCH_LVL_TL4:
455 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
458 case NIX_TXSCH_LVL_TL3:
459 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
462 case NIX_TXSCH_LVL_TL2:
463 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
466 case NIX_TXSCH_LVL_TL1:
467 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
478 populate_tm_reg(struct otx2_eth_dev *dev,
479 struct otx2_nix_tm_node *tm_node)
481 struct otx2_nix_tm_shaper_profile *profile;
482 uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
483 uint64_t regval[MAX_REGS_PER_MBOX_MSG];
484 uint64_t reg[MAX_REGS_PER_MBOX_MSG];
485 struct otx2_mbox *mbox = dev->mbox;
486 uint64_t parent = 0, child = 0;
487 uint32_t hw_lvl, rr_prio, schq;
488 struct nix_txschq_config *req;
492 memset(regval_mask, 0, sizeof(regval_mask));
493 profile = nix_tm_shaper_profile_search(dev,
494 tm_node->params.shaper_profile_id);
495 rr_prio = tm_node->rr_prio;
496 hw_lvl = tm_node->hw_lvl;
497 schq = tm_node->hw_id;
499 /* Root node will not have a parent node */
500 if (hw_lvl == dev->otx2_tm_root_lvl)
501 parent = tm_node->parent_hw_id;
503 parent = tm_node->parent->hw_id;
505 /* Do we need this trigger to configure TL1 */
506 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
507 hw_lvl == dev->otx2_tm_root_lvl) {
508 rc = populate_tm_tl1_default(dev, parent);
513 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
514 child = find_prio_anchor(dev, tm_node->id);
516 /* Override default rr_prio when TL1
517 * Static Priority is disabled
519 if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
520 dev->tm_flags & NIX_TM_TL1_NO_SP) {
521 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
525 otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
526 " prio_anchor %"PRIu64" rr_prio %u (%p)",
527 nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
528 parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
530 /* Prepare Topology and Link config */
532 case NIX_TXSCH_LVL_SMQ:
534 /* Set xoff which will be cleared later */
535 reg[k] = NIX_AF_SMQX_CFG(schq);
536 regval[k] = BIT_ULL(50);
537 regval_mask[k] = ~BIT_ULL(50);
540 /* Parent and schedule conf */
541 reg[k] = NIX_AF_MDQX_PARENT(schq);
542 regval[k] = parent << 16;
546 case NIX_TXSCH_LVL_TL4:
547 /* Parent and schedule conf */
548 reg[k] = NIX_AF_TL4X_PARENT(schq);
549 regval[k] = parent << 16;
552 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
553 regval[k] = (child << 32) | (rr_prio << 1);
556 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
557 if (otx2_dev_is_sdp(dev)) {
558 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
559 regval[k] = BIT_ULL(12);
563 case NIX_TXSCH_LVL_TL3:
564 /* Parent and schedule conf */
565 reg[k] = NIX_AF_TL3X_PARENT(schq);
566 regval[k] = parent << 16;
569 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
570 regval[k] = (child << 32) | (rr_prio << 1);
573 /* Link configuration */
574 if (!otx2_dev_is_sdp(dev) &&
575 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
576 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
577 otx2_nix_get_link(dev));
578 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
583 case NIX_TXSCH_LVL_TL2:
584 /* Parent and schedule conf */
585 reg[k] = NIX_AF_TL2X_PARENT(schq);
586 regval[k] = parent << 16;
589 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
590 regval[k] = (child << 32) | (rr_prio << 1);
593 /* Link configuration */
594 if (!otx2_dev_is_sdp(dev) &&
595 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
596 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
597 otx2_nix_get_link(dev));
598 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
603 case NIX_TXSCH_LVL_TL1:
604 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
605 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
611 /* Prepare schedule config */
612 k += prepare_tm_sched_reg(dev, tm_node, ®[k], ®val[k]);
614 /* Prepare shaping config */
615 k += prepare_tm_shaper_reg(tm_node, profile, ®[k], ®val[k]);
620 /* Copy and send config mbox */
621 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
625 otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
626 otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
627 otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
629 rc = otx2_mbox_process(mbox);
635 otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
641 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
643 struct otx2_nix_tm_node *tm_node;
647 for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
648 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
649 if (tm_node->hw_lvl == hw_lvl &&
650 tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
651 rc = populate_tm_reg(dev, tm_node);
661 static struct otx2_nix_tm_node *
662 nix_tm_node_search(struct otx2_eth_dev *dev,
663 uint32_t node_id, bool user)
665 struct otx2_nix_tm_node *tm_node;
667 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
668 if (tm_node->id == node_id &&
669 (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
676 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
678 struct otx2_nix_tm_node *tm_node;
681 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
682 if (!tm_node->parent)
685 if (!(tm_node->parent->id == parent_id))
688 if (tm_node->priority == priority)
695 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
697 struct otx2_nix_tm_node *tm_node_child;
698 struct otx2_nix_tm_node *tm_node;
699 struct otx2_nix_tm_node *parent;
703 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
704 if (!tm_node->parent)
706 /* Count group of children of same priority i.e are RR */
707 parent = tm_node->parent;
708 priority = tm_node->priority;
709 rr_num = check_rr(dev, priority, parent->id);
711 /* Assuming that multiple RR groups are
712 * not configured based on capability.
715 parent->rr_prio = priority;
716 parent->rr_num = rr_num;
719 /* Find out static priority children that are not in RR */
720 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
721 if (!tm_node_child->parent)
723 if (parent->id != tm_node_child->parent->id)
725 if (parent->max_prio == UINT32_MAX &&
726 tm_node_child->priority != parent->rr_prio)
727 parent->max_prio = 0;
729 if (parent->max_prio < tm_node_child->priority &&
730 parent->rr_prio != tm_node_child->priority)
731 parent->max_prio = tm_node_child->priority;
739 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
740 uint32_t parent_node_id, uint32_t priority,
741 uint32_t weight, uint16_t hw_lvl,
742 uint16_t lvl, bool user,
743 struct rte_tm_node_params *params)
745 struct otx2_nix_tm_shaper_profile *profile;
746 struct otx2_nix_tm_node *tm_node, *parent_node;
747 struct shaper_params cir, pir;
750 profile_id = params->shaper_profile_id;
751 profile = nix_tm_shaper_profile_search(dev, profile_id);
753 parent_node = nix_tm_node_search(dev, parent_node_id, user);
755 tm_node = rte_zmalloc("otx2_nix_tm_node",
756 sizeof(struct otx2_nix_tm_node), 0);
761 tm_node->hw_lvl = hw_lvl;
763 /* Maintain minimum weight */
767 tm_node->id = node_id;
768 tm_node->priority = priority;
769 tm_node->weight = weight;
770 tm_node->rr_prio = 0xf;
771 tm_node->max_prio = UINT32_MAX;
772 tm_node->hw_id = UINT32_MAX;
775 tm_node->flags = NIX_TM_NODE_USER;
776 rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
779 profile->reference_count++;
781 memset(&cir, 0, sizeof(cir));
782 memset(&pir, 0, sizeof(pir));
783 shaper_config_to_nix(profile, &cir, &pir);
785 tm_node->parent = parent_node;
786 tm_node->parent_hw_id = UINT32_MAX;
787 /* C0 doesn't support STALL when both PIR & CIR are enabled */
788 if (lvl < OTX2_TM_LVL_QUEUE &&
789 otx2_dev_is_96xx_Cx(dev) &&
790 pir.rate && cir.rate)
791 tm_node->red_algo = NIX_REDALG_DISCARD;
793 tm_node->red_algo = NIX_REDALG_STD;
795 TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
801 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
803 struct otx2_nix_tm_shaper_profile *shaper_profile;
805 while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
806 if (shaper_profile->reference_count)
807 otx2_tm_dbg("Shaper profile %u has non zero references",
808 shaper_profile->shaper_profile_id);
809 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
810 rte_free(shaper_profile);
817 nix_clear_path_xoff(struct otx2_eth_dev *dev,
818 struct otx2_nix_tm_node *tm_node)
820 struct nix_txschq_config *req;
821 struct otx2_nix_tm_node *p;
824 /* Manipulating SW_XOFF not supported on Ax */
825 if (otx2_dev_is_Ax(dev))
828 /* Enable nodes in path for flush to succeed */
829 if (!nix_tm_is_leaf(dev, tm_node->lvl))
834 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
835 (p->flags & NIX_TM_NODE_HWRES)) {
836 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
837 req->lvl = p->hw_lvl;
838 req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
840 rc = otx2_mbox_process(dev->mbox);
844 p->flags |= NIX_TM_NODE_ENABLED;
853 nix_smq_xoff(struct otx2_eth_dev *dev,
854 struct otx2_nix_tm_node *tm_node,
857 struct otx2_mbox *mbox = dev->mbox;
858 struct nix_txschq_config *req;
862 smq = tm_node->hw_id;
863 otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
864 enable ? "enable" : "disable");
866 rc = nix_clear_path_xoff(dev, tm_node);
870 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
871 req->lvl = NIX_TXSCH_LVL_SMQ;
874 req->reg[0] = NIX_AF_SMQX_CFG(smq);
875 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
876 req->regval_mask[0] = enable ?
877 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
879 return otx2_mbox_process(mbox);
883 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
885 struct otx2_eth_txq *txq = __txq;
886 struct npa_aq_enq_req *req;
887 struct npa_aq_enq_rsp *rsp;
888 struct otx2_npa_lf *lf;
889 struct otx2_mbox *mbox;
890 uint64_t aura_handle;
893 otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
894 enable ? "enable" : "disable");
896 lf = otx2_npa_lf_obj_get();
900 /* Set/clear sqb aura fc_ena */
901 aura_handle = txq->sqb_pool->pool_id;
902 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
904 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
905 req->ctype = NPA_AQ_CTYPE_AURA;
906 req->op = NPA_AQ_INSTOP_WRITE;
907 /* Below is not needed for aura writes but AF driver needs it */
908 /* AF will translate to associated poolctx */
909 req->aura.pool_addr = req->aura_id;
911 req->aura.fc_ena = enable;
912 req->aura_mask.fc_ena = 1;
914 rc = otx2_mbox_process(mbox);
918 /* Read back npa aura ctx */
919 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
921 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
922 req->ctype = NPA_AQ_CTYPE_AURA;
923 req->op = NPA_AQ_INSTOP_READ;
925 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
929 /* Init when enabled as there might be no triggers */
931 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
933 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
934 /* Sync write barrier */
941 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
943 uint16_t sqb_cnt, head_off, tail_off;
944 struct otx2_eth_dev *dev = txq->dev;
945 uint64_t wdata, val, prev;
946 uint16_t sq = txq->sq;
948 uint64_t timeout;/* 10's of usec */
950 /* Wait for enough time based on shaper min rate */
951 timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
952 timeout = timeout / dev->tm_rate_min;
956 wdata = ((uint64_t)sq << 32);
957 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
958 val = otx2_atomic64_add_nosync(wdata, regaddr);
960 /* Spin multiple iterations as "txq->fc_cache_pkts" can still
961 * have space to send pkts even though fc_mem is disabled
967 val = otx2_atomic64_add_nosync(wdata, regaddr);
968 /* Continue on error */
969 if (val & BIT_ULL(63))
975 sqb_cnt = val & 0xFFFF;
976 head_off = (val >> 20) & 0x3F;
977 tail_off = (val >> 28) & 0x3F;
979 /* SQ reached quiescent state */
980 if (sqb_cnt <= 1 && head_off == tail_off &&
981 (*txq->fc_mem == txq->nb_sqb_bufs)) {
993 otx2_nix_tm_dump(dev);
997 /* Flush and disable tx queue and its parent SMQ */
998 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
1000 struct otx2_nix_tm_node *tm_node, *sibling;
1001 struct otx2_eth_txq *txq;
1002 struct otx2_eth_dev *dev;
1011 user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1013 /* Find the node for this SQ */
1014 tm_node = nix_tm_node_search(dev, sq, user);
1015 if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1016 otx2_err("Invalid node/state for sq %u", sq);
1020 /* Enable CGX RXTX to drain pkts */
1022 /* Though it enables both RX MCAM Entries and CGX Link
1023 * we assume all the rx queues are stopped way back.
1025 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1026 rc = otx2_mbox_process(dev->mbox);
1028 otx2_err("cgx start failed, rc=%d", rc);
1033 /* Disable smq xoff for case it was enabled earlier */
1034 rc = nix_smq_xoff(dev, tm_node->parent, false);
1036 otx2_err("Failed to enable smq %u, rc=%d",
1037 tm_node->parent->hw_id, rc);
1041 /* As per HRM, to disable an SQ, all other SQ's
1042 * that feed to same SMQ must be paused before SMQ flush.
1044 TAILQ_FOREACH(sibling, &dev->node_list, node) {
1045 if (sibling->parent != tm_node->parent)
1047 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1051 txq = dev->eth_dev->data->tx_queues[sq];
1055 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1057 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1061 /* Wait for sq entries to be flushed */
1062 rc = nix_txq_flush_sq_spin(txq);
1064 otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1069 tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1071 /* Disable and flush */
1072 rc = nix_smq_xoff(dev, tm_node->parent, true);
1074 otx2_err("Failed to disable smq %u, rc=%d",
1075 tm_node->parent->hw_id, rc);
1079 /* Restore cgx state */
1081 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1082 rc |= otx2_mbox_process(dev->mbox);
1088 int otx2_nix_sq_flush_post(void *_txq)
1090 struct otx2_nix_tm_node *tm_node, *sibling;
1091 struct otx2_eth_txq *txq = _txq;
1092 struct otx2_eth_txq *s_txq;
1093 struct otx2_eth_dev *dev;
1101 user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1103 /* Find the node for this SQ */
1104 tm_node = nix_tm_node_search(dev, sq, user);
1106 otx2_err("Invalid node for sq %u", sq);
1110 /* Enable all the siblings back */
1111 TAILQ_FOREACH(sibling, &dev->node_list, node) {
1112 if (sibling->parent != tm_node->parent)
1115 if (sibling->id == sq)
1118 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1122 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1127 /* Enable back if any SQ is still present */
1128 rc = nix_smq_xoff(dev, tm_node->parent, false);
1130 otx2_err("Failed to enable smq %u, rc=%d",
1131 tm_node->parent->hw_id, rc);
1137 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1139 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1148 nix_sq_sched_data(struct otx2_eth_dev *dev,
1149 struct otx2_nix_tm_node *tm_node,
1150 bool rr_quantum_only)
1152 struct rte_eth_dev *eth_dev = dev->eth_dev;
1153 struct otx2_mbox *mbox = dev->mbox;
1154 uint16_t sq = tm_node->id, smq;
1155 struct nix_aq_enq_req *req;
1156 uint64_t rr_quantum;
1159 smq = tm_node->parent->hw_id;
1160 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1162 if (rr_quantum_only)
1163 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1165 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1166 sq, smq, rr_quantum);
1168 if (sq > eth_dev->data->nb_tx_queues)
1171 req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1173 req->ctype = NIX_AQ_CTYPE_SQ;
1174 req->op = NIX_AQ_INSTOP_WRITE;
1176 /* smq update only when needed */
1177 if (!rr_quantum_only) {
1179 req->sq_mask.smq = ~req->sq_mask.smq;
1181 req->sq.smq_rr_quantum = rr_quantum;
1182 req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1184 rc = otx2_mbox_process(mbox);
1186 otx2_err("Failed to set smq, rc=%d", rc);
1190 int otx2_nix_sq_enable(void *_txq)
1192 struct otx2_eth_txq *txq = _txq;
1195 /* Enable sqb_aura fc */
1196 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1198 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1206 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1207 uint32_t flags, bool hw_only)
1209 struct otx2_nix_tm_shaper_profile *profile;
1210 struct otx2_nix_tm_node *tm_node, *next_node;
1211 struct otx2_mbox *mbox = dev->mbox;
1212 struct nix_txsch_free_req *req;
1213 uint32_t profile_id;
1216 next_node = TAILQ_FIRST(&dev->node_list);
1218 tm_node = next_node;
1219 next_node = TAILQ_NEXT(tm_node, node);
1221 /* Check for only requested nodes */
1222 if ((tm_node->flags & flags_mask) != flags)
1225 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1226 tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1227 tm_node->flags & NIX_TM_NODE_HWRES) {
1228 /* Free specific HW resource */
1229 otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1230 nix_hwlvl2str(tm_node->hw_lvl),
1231 tm_node->hw_id, tm_node->lvl,
1232 tm_node->id, tm_node);
1234 rc = nix_clear_path_xoff(dev, tm_node);
1238 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1240 req->schq_lvl = tm_node->hw_lvl;
1241 req->schq = tm_node->hw_id;
1242 rc = otx2_mbox_process(mbox);
1245 tm_node->flags &= ~NIX_TM_NODE_HWRES;
1248 /* Leave software elements if needed */
1252 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1253 tm_node->lvl, tm_node->id, tm_node);
1255 profile_id = tm_node->params.shaper_profile_id;
1256 profile = nix_tm_shaper_profile_search(dev, profile_id);
1258 profile->reference_count--;
1260 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1265 /* Free all hw resources */
1266 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1267 req->flags = TXSCHQ_FREE_ALL;
1269 return otx2_mbox_process(mbox);
1276 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1277 struct nix_txsch_alloc_rsp *rsp)
1282 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1283 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1284 dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1285 dev->txschq_contig_list[lvl][schq] =
1286 rsp->schq_contig_list[lvl][schq];
1289 dev->txschq[lvl] = rsp->schq[lvl];
1290 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1296 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1297 struct otx2_nix_tm_node *child,
1298 struct otx2_nix_tm_node *parent)
1300 uint32_t hw_id, schq_con_index, prio_offset;
1301 uint32_t l_id, schq_index;
1303 otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1304 nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1306 child->flags |= NIX_TM_NODE_HWRES;
1308 /* Process root nodes */
1309 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1310 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1312 uint32_t tschq_con_index;
1314 l_id = child->hw_lvl;
1315 tschq_con_index = dev->txschq_contig_index[l_id];
1316 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1317 child->hw_id = hw_id;
1318 dev->txschq_contig_index[l_id]++;
1319 /* Update TL1 hw_id for its parent for config purpose */
1320 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1321 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1322 child->parent_hw_id = hw_id;
1325 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1326 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1327 uint32_t tschq_con_index;
1329 l_id = child->hw_lvl;
1330 tschq_con_index = dev->txschq_index[l_id];
1331 hw_id = dev->txschq_list[l_id][tschq_con_index];
1332 child->hw_id = hw_id;
1333 dev->txschq_index[l_id]++;
1337 /* Process children with parents */
1338 l_id = child->hw_lvl;
1339 schq_index = dev->txschq_index[l_id];
1340 schq_con_index = dev->txschq_contig_index[l_id];
1342 if (child->priority == parent->rr_prio) {
1343 hw_id = dev->txschq_list[l_id][schq_index];
1344 child->hw_id = hw_id;
1345 child->parent_hw_id = parent->hw_id;
1346 dev->txschq_index[l_id]++;
1348 prio_offset = schq_con_index + child->priority;
1349 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1350 child->hw_id = hw_id;
1356 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1358 struct otx2_nix_tm_node *parent, *child;
1359 uint32_t child_hw_lvl, con_index_inc, i;
1361 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1362 TAILQ_FOREACH(parent, &dev->node_list, node) {
1363 child_hw_lvl = parent->hw_lvl - 1;
1364 if (parent->hw_lvl != i)
1366 TAILQ_FOREACH(child, &dev->node_list, node) {
1369 if (child->parent->id != parent->id)
1371 nix_tm_assign_id_to_node(dev, child, parent);
1374 con_index_inc = parent->max_prio + 1;
1375 dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1378 * Explicitly assign id to parent node if it
1379 * doesn't have a parent
1381 if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1382 nix_tm_assign_id_to_node(dev, parent, NULL);
1389 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1390 struct nix_txsch_alloc_req *req, uint8_t lvl)
1392 struct otx2_nix_tm_node *tm_node;
1393 uint8_t contig_count;
1395 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1396 if (lvl == tm_node->hw_lvl) {
1397 req->schq[lvl - 1] += tm_node->rr_num;
1398 if (tm_node->max_prio != UINT32_MAX) {
1399 contig_count = tm_node->max_prio + 1;
1400 req->schq_contig[lvl - 1] += contig_count;
1403 if (lvl == dev->otx2_tm_root_lvl &&
1404 dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1405 tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1406 req->schq_contig[dev->otx2_tm_root_lvl]++;
1410 req->schq[NIX_TXSCH_LVL_TL1] = 1;
1411 req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1417 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1418 struct nix_txsch_alloc_req *req)
1422 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1423 nix_tm_count_req_schq(dev, req, i);
1425 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1426 dev->txschq_index[i] = 0;
1427 dev->txschq_contig_index[i] = 0;
1433 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1435 struct otx2_mbox *mbox = dev->mbox;
1436 struct nix_txsch_alloc_req *req;
1437 struct nix_txsch_alloc_rsp *rsp;
1440 req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1442 rc = nix_tm_prepare_txschq_req(dev, req);
1446 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1450 nix_tm_copy_rsp_to_dev(dev, rsp);
1451 dev->link_cfg_lvl = rsp->link_cfg_lvl;
1453 nix_tm_assign_hw_id(dev);
1458 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1460 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1461 struct otx2_nix_tm_node *tm_node;
1462 struct otx2_eth_txq *txq;
1466 nix_tm_update_parent_info(dev);
1468 rc = nix_tm_send_txsch_alloc_msg(dev);
1470 otx2_err("TM failed to alloc tm resources=%d", rc);
1474 rc = nix_tm_txsch_reg_config(dev);
1476 otx2_err("TM failed to configure sched registers=%d", rc);
1480 /* Trigger MTU recalculate as SMQ needs MTU conf */
1481 if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1482 rc = otx2_nix_recalc_mtu(eth_dev);
1484 otx2_err("TM MTU update failed, rc=%d", rc);
1489 /* Mark all non-leaf's as enabled */
1490 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1491 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1492 tm_node->flags |= NIX_TM_NODE_ENABLED;
1498 /* Update SQ Sched Data while SQ is idle */
1499 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1500 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1503 rc = nix_sq_sched_data(dev, tm_node, false);
1505 otx2_err("SQ %u sched update failed, rc=%d",
1511 /* Finally XON all SMQ's */
1512 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1513 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1516 rc = nix_smq_xoff(dev, tm_node, false);
1518 otx2_err("Failed to enable smq %u, rc=%d",
1519 tm_node->hw_id, rc);
1524 /* Enable xmit as all the topology is ready */
1525 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1526 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1530 txq = eth_dev->data->tx_queues[sq];
1532 rc = otx2_nix_sq_enable(txq);
1534 otx2_err("TM sw xon failed on SQ %u, rc=%d",
1538 tm_node->flags |= NIX_TM_NODE_ENABLED;
1545 send_tm_reqval(struct otx2_mbox *mbox,
1546 struct nix_txschq_config *req,
1547 struct rte_tm_error *error)
1551 if (!req->num_regs ||
1552 req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1553 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1554 error->message = "invalid config";
1558 rc = otx2_mbox_process(mbox);
1560 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1561 error->message = "unexpected fatal error";
1567 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1569 if (nix_tm_have_tl1_access(dev)) {
1571 case OTX2_TM_LVL_ROOT:
1572 return NIX_TXSCH_LVL_TL1;
1573 case OTX2_TM_LVL_SCH1:
1574 return NIX_TXSCH_LVL_TL2;
1575 case OTX2_TM_LVL_SCH2:
1576 return NIX_TXSCH_LVL_TL3;
1577 case OTX2_TM_LVL_SCH3:
1578 return NIX_TXSCH_LVL_TL4;
1579 case OTX2_TM_LVL_SCH4:
1580 return NIX_TXSCH_LVL_SMQ;
1582 return NIX_TXSCH_LVL_CNT;
1586 case OTX2_TM_LVL_ROOT:
1587 return NIX_TXSCH_LVL_TL2;
1588 case OTX2_TM_LVL_SCH1:
1589 return NIX_TXSCH_LVL_TL3;
1590 case OTX2_TM_LVL_SCH2:
1591 return NIX_TXSCH_LVL_TL4;
1592 case OTX2_TM_LVL_SCH3:
1593 return NIX_TXSCH_LVL_SMQ;
1595 return NIX_TXSCH_LVL_CNT;
1601 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1603 if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1606 /* MDQ doesn't support SP */
1607 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1610 /* PF's TL1 with VF's enabled doesn't support SP */
1611 if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1612 (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1613 (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1616 return TXSCH_TLX_SP_PRIO_MAX - 1;
1621 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1622 uint32_t parent_id, uint32_t priority,
1623 struct rte_tm_error *error)
1625 uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1626 struct otx2_nix_tm_node *tm_node;
1627 uint32_t rr_num = 0;
1630 /* Validate priority against max */
1631 if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1632 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1633 error->message = "unsupported priority value";
1637 if (parent_id == RTE_TM_NODE_ID_NULL)
1640 memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1641 priorities[priority] = 1;
1643 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1644 if (!tm_node->parent)
1647 if (!(tm_node->flags & NIX_TM_NODE_USER))
1650 if (tm_node->parent->id != parent_id)
1653 priorities[tm_node->priority]++;
1656 for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1657 if (priorities[i] > 1)
1660 /* At max, one rr groups per parent */
1662 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1663 error->message = "multiple DWRR node priority";
1667 /* Check for previous priority to avoid holes in priorities */
1668 if (priority && !priorities[priority - 1]) {
1669 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1670 error->message = "priority not in order";
1678 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1679 uint64_t *regval, uint32_t hw_lvl)
1681 volatile struct nix_txschq_config *req;
1682 struct nix_txschq_config *rsp;
1685 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1691 rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1694 *regval = rsp->regval[0];
1698 /* Search for min rate in topology */
1700 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1702 struct otx2_nix_tm_shaper_profile *profile;
1703 uint64_t rate_min = 1E9; /* 1 Gbps */
1705 TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1706 if (profile->params.peak.rate &&
1707 profile->params.peak.rate < rate_min)
1708 rate_min = profile->params.peak.rate;
1710 if (profile->params.committed.rate &&
1711 profile->params.committed.rate < rate_min)
1712 rate_min = profile->params.committed.rate;
1715 dev->tm_rate_min = rate_min;
1719 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1721 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1722 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1723 uint16_t sqb_cnt, head_off, tail_off;
1724 struct otx2_nix_tm_node *tm_node;
1725 struct otx2_eth_txq *txq;
1726 uint64_t wdata, val;
1729 otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1731 /* Enable CGX RXTX to drain pkts */
1732 if (!eth_dev->data->dev_started) {
1733 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1734 rc = otx2_mbox_process(dev->mbox);
1740 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1741 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1743 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1746 rc = nix_smq_xoff(dev, tm_node, false);
1748 otx2_err("Failed to enable smq %u, rc=%d",
1749 tm_node->hw_id, rc);
1754 /* Flush all tx queues */
1755 for (i = 0; i < sq_cnt; i++) {
1756 txq = eth_dev->data->tx_queues[i];
1758 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1760 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1764 /* Wait for sq entries to be flushed */
1765 rc = nix_txq_flush_sq_spin(txq);
1767 otx2_err("Failed to drain sq, rc=%d\n", rc);
1772 /* XOFF & Flush all SMQ's. HRM mandates
1773 * all SQ's empty before SMQ flush is issued.
1775 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1776 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1778 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1781 rc = nix_smq_xoff(dev, tm_node, true);
1783 otx2_err("Failed to enable smq %u, rc=%d",
1784 tm_node->hw_id, rc);
1789 /* Verify sanity of all tx queues */
1790 for (i = 0; i < sq_cnt; i++) {
1791 txq = eth_dev->data->tx_queues[i];
1793 wdata = ((uint64_t)txq->sq << 32);
1794 val = otx2_atomic64_add_nosync(wdata,
1795 (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1797 sqb_cnt = val & 0xFFFF;
1798 head_off = (val >> 20) & 0x3F;
1799 tail_off = (val >> 28) & 0x3F;
1801 if (sqb_cnt > 1 || head_off != tail_off ||
1802 (*txq->fc_mem != txq->nb_sqb_bufs))
1803 otx2_err("Failed to gracefully flush sq %u", txq->sq);
1807 /* restore cgx state */
1808 if (!eth_dev->data->dev_started) {
1809 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1810 rc |= otx2_mbox_process(dev->mbox);
1817 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1818 int *is_leaf, struct rte_tm_error *error)
1820 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1821 struct otx2_nix_tm_node *tm_node;
1823 if (is_leaf == NULL) {
1824 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1828 tm_node = nix_tm_node_search(dev, node_id, true);
1829 if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1830 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1833 if (nix_tm_is_leaf(dev, tm_node->lvl))
1841 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1842 struct rte_tm_capabilities *cap,
1843 struct rte_tm_error *error)
1845 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1846 struct otx2_mbox *mbox = dev->mbox;
1847 int rc, max_nr_nodes = 0, i;
1848 struct free_rsrcs_rsp *rsp;
1850 memset(cap, 0, sizeof(*cap));
1852 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1853 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1855 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1856 error->message = "unexpected fatal error";
1860 for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1861 max_nr_nodes += rsp->schq[i];
1863 cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1864 /* TL1 level is reserved for PF */
1865 cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1866 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1867 cap->non_leaf_nodes_identical = 1;
1868 cap->leaf_nodes_identical = 1;
1870 /* Shaper Capabilities */
1871 cap->shaper_private_n_max = max_nr_nodes;
1872 cap->shaper_n_max = max_nr_nodes;
1873 cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1874 cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1875 cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1876 cap->shaper_pkt_length_adjust_min = 0;
1877 cap->shaper_pkt_length_adjust_max = 0;
1879 /* Schedule Capabilities */
1880 cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1881 cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1882 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1883 cap->sched_wfq_n_groups_max = 1;
1884 cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1886 cap->dynamic_update_mask =
1887 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1888 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1890 RTE_TM_STATS_N_PKTS |
1891 RTE_TM_STATS_N_BYTES |
1892 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1893 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1895 for (i = 0; i < RTE_COLORS; i++) {
1896 cap->mark_vlan_dei_supported[i] = false;
1897 cap->mark_ip_ecn_tcp_supported[i] = false;
1898 cap->mark_ip_dscp_supported[i] = false;
1905 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1906 struct rte_tm_level_capabilities *cap,
1907 struct rte_tm_error *error)
1909 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1910 struct otx2_mbox *mbox = dev->mbox;
1911 struct free_rsrcs_rsp *rsp;
1915 memset(cap, 0, sizeof(*cap));
1917 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1918 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1920 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1921 error->message = "unexpected fatal error";
1925 hw_lvl = nix_tm_lvl2nix(dev, lvl);
1927 if (nix_tm_is_leaf(dev, lvl)) {
1929 cap->n_nodes_max = dev->tm_leaf_cnt;
1930 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1931 cap->leaf_nodes_identical = 1;
1932 cap->leaf.stats_mask =
1933 RTE_TM_STATS_N_PKTS |
1934 RTE_TM_STATS_N_BYTES;
1936 } else if (lvl == OTX2_TM_LVL_ROOT) {
1937 /* Root node, aka TL2(vf)/TL1(pf) */
1938 cap->n_nodes_max = 1;
1939 cap->n_nodes_nonleaf_max = 1;
1940 cap->non_leaf_nodes_identical = 1;
1942 cap->nonleaf.shaper_private_supported = true;
1943 cap->nonleaf.shaper_private_dual_rate_supported =
1944 nix_tm_have_tl1_access(dev) ? false : true;
1945 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1946 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1948 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
1949 cap->nonleaf.sched_sp_n_priorities_max =
1950 nix_max_prio(dev, hw_lvl) + 1;
1951 cap->nonleaf.sched_wfq_n_groups_max = 1;
1952 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1954 if (nix_tm_have_tl1_access(dev))
1955 cap->nonleaf.stats_mask =
1956 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1957 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1958 } else if ((lvl < OTX2_TM_LVL_MAX) &&
1959 (hw_lvl < NIX_TXSCH_LVL_CNT)) {
1960 /* TL2, TL3, TL4, MDQ */
1961 cap->n_nodes_max = rsp->schq[hw_lvl];
1962 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
1963 cap->non_leaf_nodes_identical = 1;
1965 cap->nonleaf.shaper_private_supported = true;
1966 cap->nonleaf.shaper_private_dual_rate_supported = true;
1967 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1968 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1970 /* MDQ doesn't support Strict Priority */
1971 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1972 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
1974 cap->nonleaf.sched_n_children_max =
1975 rsp->schq[hw_lvl - 1];
1976 cap->nonleaf.sched_sp_n_priorities_max =
1977 nix_max_prio(dev, hw_lvl) + 1;
1978 cap->nonleaf.sched_wfq_n_groups_max = 1;
1979 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1981 /* unsupported level */
1982 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1989 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1990 struct rte_tm_node_capabilities *cap,
1991 struct rte_tm_error *error)
1993 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1994 struct otx2_mbox *mbox = dev->mbox;
1995 struct otx2_nix_tm_node *tm_node;
1996 struct free_rsrcs_rsp *rsp;
1997 int rc, hw_lvl, lvl;
1999 memset(cap, 0, sizeof(*cap));
2001 tm_node = nix_tm_node_search(dev, node_id, true);
2003 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2004 error->message = "no such node";
2008 hw_lvl = tm_node->hw_lvl;
2012 if (nix_tm_is_leaf(dev, lvl)) {
2013 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2014 RTE_TM_STATS_N_BYTES;
2018 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2019 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2021 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2022 error->message = "unexpected fatal error";
2026 /* Non Leaf Shaper */
2027 cap->shaper_private_supported = true;
2028 cap->shaper_private_dual_rate_supported =
2029 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2030 cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2031 cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2033 /* Non Leaf Scheduler */
2034 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2035 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2037 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2039 cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2040 cap->nonleaf.sched_wfq_n_children_per_group_max =
2041 cap->nonleaf.sched_n_children_max;
2042 cap->nonleaf.sched_wfq_n_groups_max = 1;
2043 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2045 if (hw_lvl == NIX_TXSCH_LVL_TL1)
2046 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2047 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2052 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2053 uint32_t profile_id,
2054 struct rte_tm_shaper_params *params,
2055 struct rte_tm_error *error)
2057 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2058 struct otx2_nix_tm_shaper_profile *profile;
2060 profile = nix_tm_shaper_profile_search(dev, profile_id);
2062 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2063 error->message = "shaper profile ID exist";
2067 /* Committed rate and burst size can be enabled/disabled */
2068 if (params->committed.size || params->committed.rate) {
2069 if (params->committed.size < MIN_SHAPER_BURST ||
2070 params->committed.size > MAX_SHAPER_BURST) {
2072 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2074 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2075 NULL, NULL, NULL)) {
2077 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2078 error->message = "shaper committed rate invalid";
2083 /* Peak rate and burst size can be enabled/disabled */
2084 if (params->peak.size || params->peak.rate) {
2085 if (params->peak.size < MIN_SHAPER_BURST ||
2086 params->peak.size > MAX_SHAPER_BURST) {
2088 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2090 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2091 NULL, NULL, NULL)) {
2093 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2094 error->message = "shaper peak rate invalid";
2099 profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2100 sizeof(struct otx2_nix_tm_shaper_profile), 0);
2104 profile->shaper_profile_id = profile_id;
2105 rte_memcpy(&profile->params, params,
2106 sizeof(struct rte_tm_shaper_params));
2107 TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2109 otx2_tm_dbg("Added TM shaper profile %u, "
2110 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2111 ", cbs %" PRIu64 " , adj %u",
2113 params->peak.rate * 8,
2115 params->committed.rate * 8,
2116 params->committed.size,
2117 params->pkt_length_adjust);
2119 /* Translate rate as bits per second */
2120 profile->params.peak.rate = profile->params.peak.rate * 8;
2121 profile->params.committed.rate = profile->params.committed.rate * 8;
2122 /* Always use PIR for single rate shaping */
2123 if (!params->peak.rate && params->committed.rate) {
2124 profile->params.peak = profile->params.committed;
2125 memset(&profile->params.committed, 0,
2126 sizeof(profile->params.committed));
2129 /* update min rate */
2130 nix_tm_shaper_profile_update_min(dev);
2135 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2136 uint32_t profile_id,
2137 struct rte_tm_error *error)
2139 struct otx2_nix_tm_shaper_profile *profile;
2140 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2142 profile = nix_tm_shaper_profile_search(dev, profile_id);
2145 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2146 error->message = "shaper profile ID not exist";
2150 if (profile->reference_count) {
2151 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2152 error->message = "shaper profile in use";
2156 otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2157 TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2160 /* update min rate */
2161 nix_tm_shaper_profile_update_min(dev);
2166 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2167 uint32_t parent_node_id, uint32_t priority,
2168 uint32_t weight, uint32_t lvl,
2169 struct rte_tm_node_params *params,
2170 struct rte_tm_error *error)
2172 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2173 struct otx2_nix_tm_node *parent_node;
2174 int rc, clear_on_fail = 0;
2175 uint32_t exp_next_lvl;
2178 /* we don't support dynamic updates */
2179 if (dev->tm_flags & NIX_TM_COMMITTED) {
2180 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2181 error->message = "dynamic update not supported";
2185 /* Leaf nodes have to be same priority */
2186 if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2187 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2188 error->message = "queue shapers must be priority 0";
2192 parent_node = nix_tm_node_search(dev, parent_node_id, true);
2194 /* find the right level */
2195 if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2196 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2197 lvl = OTX2_TM_LVL_ROOT;
2198 } else if (parent_node) {
2199 lvl = parent_node->lvl + 1;
2201 /* Neigher proper parent nor proper level id given */
2202 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2203 error->message = "invalid parent node id";
2208 /* Translate rte_tm level id's to nix hw level id's */
2209 hw_lvl = nix_tm_lvl2nix(dev, lvl);
2210 if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2211 !nix_tm_is_leaf(dev, lvl)) {
2212 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2213 error->message = "invalid level id";
2217 if (node_id < dev->tm_leaf_cnt)
2218 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2220 exp_next_lvl = hw_lvl + 1;
2222 /* Check if there is no parent node yet */
2223 if (hw_lvl != dev->otx2_tm_root_lvl &&
2224 (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2225 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2226 error->message = "invalid parent node id";
2230 /* Check if a node already exists */
2231 if (nix_tm_node_search(dev, node_id, true)) {
2232 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2233 error->message = "node already exists";
2237 /* Check if shaper profile exists for non leaf node */
2238 if (!nix_tm_is_leaf(dev, lvl) &&
2239 params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
2240 !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) {
2241 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2242 error->message = "invalid shaper profile";
2246 /* Check if there is second DWRR already in siblings or holes in prio */
2247 if (validate_prio(dev, lvl, parent_node_id, priority, error))
2250 if (weight > MAX_SCHED_WEIGHT) {
2251 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2252 error->message = "max weight exceeded";
2256 rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2257 priority, weight, hw_lvl,
2260 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2261 /* cleanup user added nodes */
2263 nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2264 NIX_TM_NODE_USER, false);
2265 error->message = "failed to add node";
2268 error->type = RTE_TM_ERROR_TYPE_NONE;
2273 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2274 struct rte_tm_error *error)
2276 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2277 struct otx2_nix_tm_node *tm_node, *child_node;
2278 struct otx2_nix_tm_shaper_profile *profile;
2279 uint32_t profile_id;
2281 /* we don't support dynamic updates yet */
2282 if (dev->tm_flags & NIX_TM_COMMITTED) {
2283 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2284 error->message = "hierarchy exists";
2288 if (node_id == RTE_TM_NODE_ID_NULL) {
2289 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2290 error->message = "invalid node id";
2294 tm_node = nix_tm_node_search(dev, node_id, true);
2296 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2297 error->message = "no such node";
2301 /* Check for any existing children */
2302 TAILQ_FOREACH(child_node, &dev->node_list, node) {
2303 if (child_node->parent == tm_node) {
2304 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2305 error->message = "children exist";
2310 /* Remove shaper profile reference */
2311 profile_id = tm_node->params.shaper_profile_id;
2312 profile = nix_tm_shaper_profile_search(dev, profile_id);
2313 profile->reference_count--;
2315 TAILQ_REMOVE(&dev->node_list, tm_node, node);
2321 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2322 struct rte_tm_error *error, bool suspend)
2324 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2325 struct otx2_mbox *mbox = dev->mbox;
2326 struct otx2_nix_tm_node *tm_node;
2327 struct nix_txschq_config *req;
2331 tm_node = nix_tm_node_search(dev, node_id, true);
2333 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2334 error->message = "no such node";
2338 if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2339 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2340 error->message = "hierarchy doesn't exist";
2344 flags = tm_node->flags;
2345 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2346 (flags | NIX_TM_NODE_ENABLED);
2348 if (tm_node->flags == flags)
2351 /* send mbox for state change */
2352 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2354 req->lvl = tm_node->hw_lvl;
2355 req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2356 req->reg, req->regval);
2357 rc = send_tm_reqval(mbox, req, error);
2359 tm_node->flags = flags;
2364 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2365 struct rte_tm_error *error)
2367 return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2371 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2372 struct rte_tm_error *error)
2374 return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2378 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2380 struct rte_tm_error *error)
2382 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2383 struct otx2_nix_tm_node *tm_node;
2384 uint32_t leaf_cnt = 0;
2387 if (dev->tm_flags & NIX_TM_COMMITTED) {
2388 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2389 error->message = "hierarchy exists";
2393 /* Check if we have all the leaf nodes */
2394 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2395 if (tm_node->flags & NIX_TM_NODE_USER &&
2396 tm_node->id < dev->tm_leaf_cnt)
2400 if (leaf_cnt != dev->tm_leaf_cnt) {
2401 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2402 error->message = "incomplete hierarchy";
2407 * Disable xmit will be enabled when
2408 * new topology is available.
2410 rc = nix_xmit_disable(eth_dev);
2412 otx2_err("failed to disable TX, rc=%d", rc);
2416 /* Delete default/ratelimit tree */
2417 if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2418 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2420 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2421 error->message = "failed to free default resources";
2424 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2425 NIX_TM_RATE_LIMIT_TREE);
2428 /* Free up user alloc'ed resources */
2429 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2430 NIX_TM_NODE_USER, true);
2432 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2433 error->message = "failed to free user resources";
2437 rc = nix_tm_alloc_resources(eth_dev, true);
2439 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2440 error->message = "alloc resources failed";
2441 /* TODO should we restore default config ? */
2443 nix_tm_free_resources(dev, 0, 0, false);
2447 error->type = RTE_TM_ERROR_TYPE_NONE;
2448 dev->tm_flags |= NIX_TM_COMMITTED;
2453 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2455 uint32_t profile_id,
2456 struct rte_tm_error *error)
2458 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2459 struct otx2_nix_tm_shaper_profile *profile = NULL;
2460 struct otx2_mbox *mbox = dev->mbox;
2461 struct otx2_nix_tm_node *tm_node;
2462 struct nix_txschq_config *req;
2466 tm_node = nix_tm_node_search(dev, node_id, true);
2467 if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2468 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2469 error->message = "invalid node";
2473 if (profile_id == tm_node->params.shaper_profile_id)
2476 if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2477 profile = nix_tm_shaper_profile_search(dev, profile_id);
2479 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2480 error->message = "shaper profile ID not exist";
2485 tm_node->params.shaper_profile_id = profile_id;
2487 /* Nothing to do if not yet committed */
2488 if (!(dev->tm_flags & NIX_TM_COMMITTED))
2491 tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2493 /* Flush the specific node with SW_XOFF */
2494 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2495 req->lvl = tm_node->hw_lvl;
2496 k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2499 rc = send_tm_reqval(mbox, req, error);
2503 /* Update the PIR/CIR and clear SW XOFF */
2504 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2505 req->lvl = tm_node->hw_lvl;
2507 k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2509 k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2512 rc = send_tm_reqval(mbox, req, error);
2514 tm_node->flags |= NIX_TM_NODE_ENABLED;
2519 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2520 uint32_t node_id, uint32_t new_parent_id,
2521 uint32_t priority, uint32_t weight,
2522 struct rte_tm_error *error)
2524 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2525 struct otx2_nix_tm_node *tm_node, *sibling;
2526 struct otx2_nix_tm_node *new_parent;
2527 struct nix_txschq_config *req;
2531 if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2532 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2533 error->message = "hierarchy doesn't exist";
2537 tm_node = nix_tm_node_search(dev, node_id, true);
2539 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2540 error->message = "no such node";
2544 /* Parent id valid only for non root nodes */
2545 if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2546 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2548 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2549 error->message = "no such parent node";
2553 /* Current support is only for dynamic weight update */
2554 if (tm_node->parent != new_parent ||
2555 tm_node->priority != priority) {
2556 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2557 error->message = "only weight update supported";
2562 /* Skip if no change */
2563 if (tm_node->weight == weight)
2566 tm_node->weight = weight;
2568 /* For leaf nodes, SQ CTX needs update */
2569 if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2570 /* Update SQ quantum data on the fly */
2571 rc = nix_sq_sched_data(dev, tm_node, true);
2573 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2574 error->message = "sq sched data update failed";
2578 /* XOFF Parent node */
2579 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2580 req->lvl = tm_node->parent->hw_lvl;
2581 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2582 req->reg, req->regval);
2583 rc = send_tm_reqval(dev->mbox, req, error);
2587 /* XOFF this node and all other siblings */
2588 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2589 req->lvl = tm_node->hw_lvl;
2592 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2593 if (sibling->parent != tm_node->parent)
2595 k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2599 rc = send_tm_reqval(dev->mbox, req, error);
2603 /* Update new weight for current node */
2604 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2605 req->lvl = tm_node->hw_lvl;
2606 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2607 req->reg, req->regval);
2608 rc = send_tm_reqval(dev->mbox, req, error);
2612 /* XON this node and all other siblings */
2613 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2614 req->lvl = tm_node->hw_lvl;
2617 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2618 if (sibling->parent != tm_node->parent)
2620 k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2624 rc = send_tm_reqval(dev->mbox, req, error);
2628 /* XON Parent node */
2629 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2630 req->lvl = tm_node->parent->hw_lvl;
2631 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2632 req->reg, req->regval);
2633 rc = send_tm_reqval(dev->mbox, req, error);
2641 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2642 struct rte_tm_node_stats *stats,
2643 uint64_t *stats_mask, int clear,
2644 struct rte_tm_error *error)
2646 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2647 struct otx2_nix_tm_node *tm_node;
2652 tm_node = nix_tm_node_search(dev, node_id, true);
2654 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2655 error->message = "no such node";
2659 /* Stats support only for leaf node or TL1 root */
2660 if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2661 reg = (((uint64_t)tm_node->id) << 32);
2664 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2665 val = otx2_atomic64_add_nosync(reg, addr);
2668 stats->n_pkts = val - tm_node->last_pkts;
2671 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2672 val = otx2_atomic64_add_nosync(reg, addr);
2675 stats->n_bytes = val - tm_node->last_bytes;
2678 tm_node->last_pkts = stats->n_pkts;
2679 tm_node->last_bytes = stats->n_bytes;
2682 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2684 } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2685 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2686 error->message = "stats read error";
2688 /* RED Drop packets */
2689 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2690 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2693 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2694 val - tm_node->last_pkts;
2696 /* RED Drop bytes */
2697 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2698 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2701 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2702 val - tm_node->last_bytes;
2706 tm_node->last_pkts =
2707 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2708 tm_node->last_bytes =
2709 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2712 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2713 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2716 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2717 error->message = "unsupported node";
2725 const struct rte_tm_ops otx2_tm_ops = {
2726 .node_type_get = otx2_nix_tm_node_type_get,
2728 .capabilities_get = otx2_nix_tm_capa_get,
2729 .level_capabilities_get = otx2_nix_tm_level_capa_get,
2730 .node_capabilities_get = otx2_nix_tm_node_capa_get,
2732 .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2733 .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2735 .node_add = otx2_nix_tm_node_add,
2736 .node_delete = otx2_nix_tm_node_delete,
2737 .node_suspend = otx2_nix_tm_node_suspend,
2738 .node_resume = otx2_nix_tm_node_resume,
2739 .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2741 .node_shaper_update = otx2_nix_tm_node_shaper_update,
2742 .node_parent_update = otx2_nix_tm_node_parent_update,
2743 .node_stats_read = otx2_nix_tm_node_stats_read,
2747 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2749 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2750 uint32_t def = eth_dev->data->nb_tx_queues;
2751 struct rte_tm_node_params params;
2752 uint32_t leaf_parent, i;
2753 int rc = 0, leaf_level;
2755 /* Default params */
2756 memset(¶ms, 0, sizeof(params));
2757 params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2759 if (nix_tm_have_tl1_access(dev)) {
2760 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2761 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2764 OTX2_TM_LVL_ROOT, false, ¶ms);
2767 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2770 OTX2_TM_LVL_SCH1, false, ¶ms);
2774 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2777 OTX2_TM_LVL_SCH2, false, ¶ms);
2781 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2784 OTX2_TM_LVL_SCH3, false, ¶ms);
2788 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2791 OTX2_TM_LVL_SCH4, false, ¶ms);
2795 leaf_parent = def + 4;
2796 leaf_level = OTX2_TM_LVL_QUEUE;
2798 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2799 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2802 OTX2_TM_LVL_ROOT, false, ¶ms);
2806 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2809 OTX2_TM_LVL_SCH1, false, ¶ms);
2813 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2816 OTX2_TM_LVL_SCH2, false, ¶ms);
2820 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2823 OTX2_TM_LVL_SCH3, false, ¶ms);
2827 leaf_parent = def + 3;
2828 leaf_level = OTX2_TM_LVL_SCH4;
2831 /* Add leaf nodes */
2832 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2833 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2836 leaf_level, false, ¶ms);
2845 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2847 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2849 TAILQ_INIT(&dev->node_list);
2850 TAILQ_INIT(&dev->shaper_profile_list);
2851 dev->tm_rate_min = 1E9; /* 1Gbps */
2854 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2856 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2857 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2858 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2861 /* Free up all resources already held */
2862 rc = nix_tm_free_resources(dev, 0, 0, false);
2864 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2868 /* Clear shaper profiles */
2869 nix_tm_clear_shaper_profiles(dev);
2870 dev->tm_flags = NIX_TM_DEFAULT_TREE;
2872 /* Disable TL1 Static Priority when VF's are enabled
2873 * as otherwise VF's TL2 reallocation will be needed
2874 * runtime to support a specific topology of PF.
2876 if (pci_dev->max_vfs)
2877 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2879 rc = nix_tm_prepare_default_tree(eth_dev);
2883 rc = nix_tm_alloc_resources(eth_dev, false);
2886 dev->tm_leaf_cnt = sq_cnt;
2892 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
2894 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2895 uint32_t def = eth_dev->data->nb_tx_queues;
2896 struct rte_tm_node_params params;
2897 uint32_t leaf_parent, i, rc = 0;
2899 memset(¶ms, 0, sizeof(params));
2901 if (nix_tm_have_tl1_access(dev)) {
2902 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2903 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2906 OTX2_TM_LVL_ROOT, false, ¶ms);
2909 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2912 OTX2_TM_LVL_SCH1, false, ¶ms);
2915 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2918 OTX2_TM_LVL_SCH2, false, ¶ms);
2921 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2924 OTX2_TM_LVL_SCH3, false, ¶ms);
2927 leaf_parent = def + 3;
2929 /* Add per queue SMQ nodes */
2930 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2931 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2933 0, DEFAULT_RR_WEIGHT,
2941 /* Add leaf nodes */
2942 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2943 rc = nix_tm_node_add_to_list(dev, i,
2944 leaf_parent + 1 + i, 0,
2956 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2957 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2958 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
2959 OTX2_TM_LVL_ROOT, false, ¶ms);
2962 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2963 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
2964 OTX2_TM_LVL_SCH1, false, ¶ms);
2967 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2968 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
2969 OTX2_TM_LVL_SCH2, false, ¶ms);
2972 leaf_parent = def + 2;
2974 /* Add per queue SMQ nodes */
2975 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2976 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2978 0, DEFAULT_RR_WEIGHT,
2986 /* Add leaf nodes */
2987 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2988 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
3001 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3002 struct otx2_nix_tm_node *tm_node,
3005 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3006 struct otx2_nix_tm_shaper_profile profile;
3007 struct otx2_mbox *mbox = dev->mbox;
3008 volatile uint64_t *reg, *regval;
3009 struct nix_txschq_config *req;
3014 flags = tm_node->flags;
3016 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3017 req->lvl = NIX_TXSCH_LVL_MDQ;
3019 regval = req->regval;
3022 k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]);
3023 flags &= ~NIX_TM_NODE_ENABLED;
3027 if (!(flags & NIX_TM_NODE_ENABLED)) {
3028 k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]);
3029 flags |= NIX_TM_NODE_ENABLED;
3032 /* Use only PIR for rate limit */
3033 memset(&profile, 0, sizeof(profile));
3034 profile.params.peak.rate = tx_rate;
3035 /* Minimum burst of ~4us Bytes of Tx */
3036 profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3037 (4ull * tx_rate) / (1E6 * 8));
3038 if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3039 dev->tm_rate_min = tx_rate;
3041 k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]);
3044 rc = otx2_mbox_process(mbox);
3048 tm_node->flags = flags;
3053 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3054 uint16_t queue_idx, uint16_t tx_rate_mbps)
3056 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3057 uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3058 struct otx2_nix_tm_node *tm_node;
3061 /* Check for supported revisions */
3062 if (otx2_dev_is_95xx_Ax(dev) ||
3063 otx2_dev_is_96xx_Ax(dev))
3066 if (queue_idx >= eth_dev->data->nb_tx_queues)
3069 if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3070 !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3073 if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3074 eth_dev->data->nb_tx_queues > 1) {
3075 /* For TM topology change ethdev needs to be stopped */
3076 if (eth_dev->data->dev_started)
3080 * Disable xmit will be enabled when
3081 * new topology is available.
3083 rc = nix_xmit_disable(eth_dev);
3085 otx2_err("failed to disable TX, rc=%d", rc);
3089 rc = nix_tm_free_resources(dev, 0, 0, false);
3091 otx2_tm_dbg("failed to free default resources, rc %d",
3096 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3098 otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3102 rc = nix_tm_alloc_resources(eth_dev, true);
3104 otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3108 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3109 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3112 tm_node = nix_tm_node_search(dev, queue_idx, false);
3114 /* check if we found a valid leaf node */
3116 !nix_tm_is_leaf(dev, tm_node->lvl) ||
3118 tm_node->parent->hw_id == UINT32_MAX)
3121 return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3123 otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3128 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3130 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3135 /* Check for supported revisions */
3136 if (otx2_dev_is_95xx_Ax(dev) ||
3137 otx2_dev_is_96xx_Ax(dev))
3140 *(const void **)arg = &otx2_tm_ops;
3146 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3148 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3151 /* Xmit is assumed to be disabled */
3152 /* Free up resources already held */
3153 rc = nix_tm_free_resources(dev, 0, 0, false);
3155 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3159 /* Clear shaper profiles */
3160 nix_tm_clear_shaper_profiles(dev);
3167 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3168 uint32_t *rr_quantum, uint16_t *smq)
3170 struct otx2_nix_tm_node *tm_node;
3173 /* 0..sq_cnt-1 are leaf nodes */
3174 if (sq >= dev->tm_leaf_cnt)
3177 /* Search for internal node first */
3178 tm_node = nix_tm_node_search(dev, sq, false);
3180 tm_node = nix_tm_node_search(dev, sq, true);
3182 /* Check if we found a valid leaf node */
3183 if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3184 !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3188 /* Get SMQ Id of leaf node's parent */
3189 *smq = tm_node->parent->hw_id;
3190 *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3192 rc = nix_smq_xoff(dev, tm_node->parent, false);
3195 tm_node->flags |= NIX_TM_NODE_ENABLED;