1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_malloc.h>
7 #include "otx2_ethdev.h"
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
13 enum otx2_tm_node_level {
24 uint64_t shaper2regval(struct shaper_params *shaper)
26 return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28 (shaper->mantissa << 1);
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
34 int link = 13 /* SDP */;
38 lmac_chan = dev->tx_chan_base;
41 if (lmac_chan >= 0x800) {
42 map = lmac_chan & 0x7FF;
43 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44 } else if (lmac_chan < 0x700) {
53 nix_get_relchan(struct otx2_eth_dev *dev)
55 return dev->tx_chan_base & 0xff;
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
61 bool is_lbk = otx2_dev_is_lbk(dev);
62 return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
68 if (nix_tm_have_tl1_access(dev))
69 return (lvl == OTX2_TM_LVL_QUEUE);
71 return (lvl == OTX2_TM_LVL_SCH4);
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
77 struct otx2_nix_tm_node *child_node;
79 TAILQ_FOREACH(child_node, &dev->node_list, node) {
80 if (!child_node->parent)
82 if (!(child_node->parent->id == node_id))
84 if (child_node->priority == child_node->parent->rr_prio)
86 return child_node->hw_id - child_node->priority;
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
95 struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
97 TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99 return tm_shaper_profile;
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106 uint64_t *mantissa_p, uint64_t *div_exp_p)
108 uint64_t div_exp, exponent, mantissa;
110 /* Boundary checks */
111 if (value < MIN_SHAPER_RATE ||
112 value > MAX_SHAPER_RATE)
115 if (value <= SHAPER_RATE(0, 0, 0)) {
116 /* Calculate rate div_exp and mantissa using
117 * the following formula:
119 * value = (2E6 * (256 + mantissa)
120 * / ((1 << div_exp) * 256))
124 mantissa = MAX_RATE_MANTISSA;
126 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
130 ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131 ((1 << div_exp) * 256)))
134 /* Calculate rate exponent and mantissa using
135 * the following formula:
137 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
141 exponent = MAX_RATE_EXPONENT;
142 mantissa = MAX_RATE_MANTISSA;
144 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
147 while (value < ((NIX_SHAPER_RATE_CONST *
148 ((256 + mantissa) << exponent)) / 256))
152 if (div_exp > MAX_RATE_DIV_EXP ||
153 exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
157 *div_exp_p = div_exp;
159 *exponent_p = exponent;
161 *mantissa_p = mantissa;
163 /* Calculate real rate value */
164 return SHAPER_RATE(exponent, mantissa, div_exp);
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169 uint64_t *mantissa_p)
171 uint64_t exponent, mantissa;
173 if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
176 /* Calculate burst exponent and mantissa using
177 * the following formula:
179 * value = (((256 + mantissa) << (exponent + 1)
183 exponent = MAX_BURST_EXPONENT;
184 mantissa = MAX_BURST_MANTISSA;
186 while (value < (1ull << (exponent + 1)))
189 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
192 if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
196 *exponent_p = exponent;
198 *mantissa_p = mantissa;
200 return SHAPER_BURST(exponent, mantissa);
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205 struct shaper_params *cir,
206 struct shaper_params *pir)
208 struct rte_tm_shaper_params *param = &profile->params;
213 /* Calculate CIR exponent and mantissa */
214 if (param->committed.rate)
215 cir->rate = shaper_rate_to_nix(param->committed.rate,
220 /* Calculate PIR exponent and mantissa */
221 if (param->peak.rate)
222 pir->rate = shaper_rate_to_nix(param->peak.rate,
227 /* Calculate CIR burst exponent and mantissa */
228 if (param->committed.size)
229 cir->burst = shaper_burst_to_nix(param->committed.size,
230 &cir->burst_exponent,
231 &cir->burst_mantissa);
233 /* Calculate PIR burst exponent and mantissa */
234 if (param->peak.size)
235 pir->burst = shaper_burst_to_nix(param->peak.size,
236 &pir->burst_exponent,
237 &pir->burst_mantissa);
241 shaper_default_red_algo(struct otx2_eth_dev *dev,
242 struct otx2_nix_tm_node *tm_node,
243 struct otx2_nix_tm_shaper_profile *profile)
245 struct shaper_params cir, pir;
247 /* C0 doesn't support STALL when both PIR & CIR are enabled */
248 if (profile && otx2_dev_is_96xx_Cx(dev)) {
249 memset(&cir, 0, sizeof(cir));
250 memset(&pir, 0, sizeof(pir));
251 shaper_config_to_nix(profile, &cir, &pir);
253 if (pir.rate && cir.rate) {
254 tm_node->red_algo = NIX_REDALG_DISCARD;
255 tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
260 tm_node->red_algo = NIX_REDALG_STD;
261 tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
265 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
267 struct otx2_mbox *mbox = dev->mbox;
268 struct nix_txschq_config *req;
271 * Default config for TL1.
272 * For VF this is always ignored.
275 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
276 req->lvl = NIX_TXSCH_LVL_TL1;
278 /* Set DWRR quantum */
279 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
280 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
283 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
284 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
287 req->reg[2] = NIX_AF_TL1X_CIR(schq);
291 return otx2_mbox_process(mbox);
295 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
296 struct otx2_nix_tm_node *tm_node,
297 volatile uint64_t *reg, volatile uint64_t *regval)
299 uint64_t strict_prio = tm_node->priority;
300 uint32_t hw_lvl = tm_node->hw_lvl;
301 uint32_t schq = tm_node->hw_id;
305 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
307 /* For children to root, strict prio is default if either
308 * device root is TL2 or TL1 Static Priority is disabled.
310 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
311 (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
312 dev->tm_flags & NIX_TM_TL1_NO_SP))
313 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
315 otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
316 "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
317 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
318 tm_node->id, strict_prio, rr_quantum, tm_node);
321 case NIX_TXSCH_LVL_SMQ:
322 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
323 regval[k] = (strict_prio << 24) | rr_quantum;
327 case NIX_TXSCH_LVL_TL4:
328 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
329 regval[k] = (strict_prio << 24) | rr_quantum;
333 case NIX_TXSCH_LVL_TL3:
334 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
335 regval[k] = (strict_prio << 24) | rr_quantum;
339 case NIX_TXSCH_LVL_TL2:
340 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
341 regval[k] = (strict_prio << 24) | rr_quantum;
345 case NIX_TXSCH_LVL_TL1:
346 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
347 regval[k] = rr_quantum;
357 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
358 struct otx2_nix_tm_shaper_profile *profile,
359 volatile uint64_t *reg, volatile uint64_t *regval)
361 struct shaper_params cir, pir;
362 uint32_t schq = tm_node->hw_id;
365 memset(&cir, 0, sizeof(cir));
366 memset(&pir, 0, sizeof(pir));
367 shaper_config_to_nix(profile, &cir, &pir);
369 otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
370 "pir %" PRIu64 "(%" PRIu64 "B),"
371 " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
372 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
373 tm_node->id, pir.rate, pir.burst,
374 cir.rate, cir.burst, tm_node);
376 switch (tm_node->hw_lvl) {
377 case NIX_TXSCH_LVL_SMQ:
378 /* Configure PIR, CIR */
379 reg[k] = NIX_AF_MDQX_PIR(schq);
380 regval[k] = (pir.rate && pir.burst) ?
381 (shaper2regval(&pir) | 1) : 0;
384 reg[k] = NIX_AF_MDQX_CIR(schq);
385 regval[k] = (cir.rate && cir.burst) ?
386 (shaper2regval(&cir) | 1) : 0;
389 /* Configure RED ALG */
390 reg[k] = NIX_AF_MDQX_SHAPE(schq);
391 regval[k] = ((uint64_t)tm_node->red_algo << 9);
394 case NIX_TXSCH_LVL_TL4:
395 /* Configure PIR, CIR */
396 reg[k] = NIX_AF_TL4X_PIR(schq);
397 regval[k] = (pir.rate && pir.burst) ?
398 (shaper2regval(&pir) | 1) : 0;
401 reg[k] = NIX_AF_TL4X_CIR(schq);
402 regval[k] = (cir.rate && cir.burst) ?
403 (shaper2regval(&cir) | 1) : 0;
406 /* Configure RED algo */
407 reg[k] = NIX_AF_TL4X_SHAPE(schq);
408 regval[k] = ((uint64_t)tm_node->red_algo << 9);
411 case NIX_TXSCH_LVL_TL3:
412 /* Configure PIR, CIR */
413 reg[k] = NIX_AF_TL3X_PIR(schq);
414 regval[k] = (pir.rate && pir.burst) ?
415 (shaper2regval(&pir) | 1) : 0;
418 reg[k] = NIX_AF_TL3X_CIR(schq);
419 regval[k] = (cir.rate && cir.burst) ?
420 (shaper2regval(&cir) | 1) : 0;
423 /* Configure RED algo */
424 reg[k] = NIX_AF_TL3X_SHAPE(schq);
425 regval[k] = ((uint64_t)tm_node->red_algo << 9);
429 case NIX_TXSCH_LVL_TL2:
430 /* Configure PIR, CIR */
431 reg[k] = NIX_AF_TL2X_PIR(schq);
432 regval[k] = (pir.rate && pir.burst) ?
433 (shaper2regval(&pir) | 1) : 0;
436 reg[k] = NIX_AF_TL2X_CIR(schq);
437 regval[k] = (cir.rate && cir.burst) ?
438 (shaper2regval(&cir) | 1) : 0;
441 /* Configure RED algo */
442 reg[k] = NIX_AF_TL2X_SHAPE(schq);
443 regval[k] = ((uint64_t)tm_node->red_algo << 9);
447 case NIX_TXSCH_LVL_TL1:
449 reg[k] = NIX_AF_TL1X_CIR(schq);
450 regval[k] = (cir.rate && cir.burst) ?
451 (shaper2regval(&cir) | 1) : 0;
460 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
461 volatile uint64_t *reg, volatile uint64_t *regval)
463 uint32_t hw_lvl = tm_node->hw_lvl;
464 uint32_t schq = tm_node->hw_id;
467 otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
468 nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
469 tm_node->id, enable, tm_node);
474 case NIX_TXSCH_LVL_MDQ:
475 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
478 case NIX_TXSCH_LVL_TL4:
479 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
482 case NIX_TXSCH_LVL_TL3:
483 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
486 case NIX_TXSCH_LVL_TL2:
487 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
490 case NIX_TXSCH_LVL_TL1:
491 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
502 populate_tm_reg(struct otx2_eth_dev *dev,
503 struct otx2_nix_tm_node *tm_node)
505 struct otx2_nix_tm_shaper_profile *profile;
506 uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
507 uint64_t regval[MAX_REGS_PER_MBOX_MSG];
508 uint64_t reg[MAX_REGS_PER_MBOX_MSG];
509 struct otx2_mbox *mbox = dev->mbox;
510 uint64_t parent = 0, child = 0;
511 uint32_t hw_lvl, rr_prio, schq;
512 struct nix_txschq_config *req;
516 memset(regval_mask, 0, sizeof(regval_mask));
517 profile = nix_tm_shaper_profile_search(dev,
518 tm_node->params.shaper_profile_id);
519 rr_prio = tm_node->rr_prio;
520 hw_lvl = tm_node->hw_lvl;
521 schq = tm_node->hw_id;
523 /* Root node will not have a parent node */
524 if (hw_lvl == dev->otx2_tm_root_lvl)
525 parent = tm_node->parent_hw_id;
527 parent = tm_node->parent->hw_id;
529 /* Do we need this trigger to configure TL1 */
530 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
531 hw_lvl == dev->otx2_tm_root_lvl) {
532 rc = populate_tm_tl1_default(dev, parent);
537 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
538 child = find_prio_anchor(dev, tm_node->id);
540 /* Override default rr_prio when TL1
541 * Static Priority is disabled
543 if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
544 dev->tm_flags & NIX_TM_TL1_NO_SP) {
545 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
549 otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
550 " prio_anchor %"PRIu64" rr_prio %u (%p)",
551 nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
552 parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
554 /* Prepare Topology and Link config */
556 case NIX_TXSCH_LVL_SMQ:
558 /* Set xoff which will be cleared later and minimum length
559 * which will be used for zero padding if packet length is
562 reg[k] = NIX_AF_SMQX_CFG(schq);
563 regval[k] = BIT_ULL(50) | NIX_MIN_HW_FRS;
564 regval_mask[k] = ~(BIT_ULL(50) | 0x7f);
567 /* Parent and schedule conf */
568 reg[k] = NIX_AF_MDQX_PARENT(schq);
569 regval[k] = parent << 16;
573 case NIX_TXSCH_LVL_TL4:
574 /* Parent and schedule conf */
575 reg[k] = NIX_AF_TL4X_PARENT(schq);
576 regval[k] = parent << 16;
579 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
580 regval[k] = (child << 32) | (rr_prio << 1);
583 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
584 if (otx2_dev_is_sdp(dev)) {
585 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
586 regval[k] = BIT_ULL(12);
590 case NIX_TXSCH_LVL_TL3:
591 /* Parent and schedule conf */
592 reg[k] = NIX_AF_TL3X_PARENT(schq);
593 regval[k] = parent << 16;
596 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
597 regval[k] = (child << 32) | (rr_prio << 1);
600 /* Link configuration */
601 if (!otx2_dev_is_sdp(dev) &&
602 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
603 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
604 otx2_nix_get_link(dev));
605 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
610 case NIX_TXSCH_LVL_TL2:
611 /* Parent and schedule conf */
612 reg[k] = NIX_AF_TL2X_PARENT(schq);
613 regval[k] = parent << 16;
616 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
617 regval[k] = (child << 32) | (rr_prio << 1);
620 /* Link configuration */
621 if (!otx2_dev_is_sdp(dev) &&
622 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
623 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
624 otx2_nix_get_link(dev));
625 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
630 case NIX_TXSCH_LVL_TL1:
631 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
632 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
638 /* Prepare schedule config */
639 k += prepare_tm_sched_reg(dev, tm_node, ®[k], ®val[k]);
641 /* Prepare shaping config */
642 k += prepare_tm_shaper_reg(tm_node, profile, ®[k], ®val[k]);
647 /* Copy and send config mbox */
648 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
652 otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
653 otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
654 otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
656 rc = otx2_mbox_process(mbox);
662 otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
668 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
670 struct otx2_nix_tm_node *tm_node;
674 for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
675 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
676 if (tm_node->hw_lvl == hw_lvl &&
677 tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
678 rc = populate_tm_reg(dev, tm_node);
688 static struct otx2_nix_tm_node *
689 nix_tm_node_search(struct otx2_eth_dev *dev,
690 uint32_t node_id, bool user)
692 struct otx2_nix_tm_node *tm_node;
694 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
695 if (tm_node->id == node_id &&
696 (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
703 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
705 struct otx2_nix_tm_node *tm_node;
708 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
709 if (!tm_node->parent)
712 if (!(tm_node->parent->id == parent_id))
715 if (tm_node->priority == priority)
722 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
724 struct otx2_nix_tm_node *tm_node_child;
725 struct otx2_nix_tm_node *tm_node;
726 struct otx2_nix_tm_node *parent;
730 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
731 if (!tm_node->parent)
733 /* Count group of children of same priority i.e are RR */
734 parent = tm_node->parent;
735 priority = tm_node->priority;
736 rr_num = check_rr(dev, priority, parent->id);
738 /* Assuming that multiple RR groups are
739 * not configured based on capability.
742 parent->rr_prio = priority;
743 parent->rr_num = rr_num;
746 /* Find out static priority children that are not in RR */
747 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
748 if (!tm_node_child->parent)
750 if (parent->id != tm_node_child->parent->id)
752 if (parent->max_prio == UINT32_MAX &&
753 tm_node_child->priority != parent->rr_prio)
754 parent->max_prio = 0;
756 if (parent->max_prio < tm_node_child->priority &&
757 parent->rr_prio != tm_node_child->priority)
758 parent->max_prio = tm_node_child->priority;
766 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
767 uint32_t parent_node_id, uint32_t priority,
768 uint32_t weight, uint16_t hw_lvl,
769 uint16_t lvl, bool user,
770 struct rte_tm_node_params *params)
772 struct otx2_nix_tm_shaper_profile *profile;
773 struct otx2_nix_tm_node *tm_node, *parent_node;
776 profile_id = params->shaper_profile_id;
777 profile = nix_tm_shaper_profile_search(dev, profile_id);
779 parent_node = nix_tm_node_search(dev, parent_node_id, user);
781 tm_node = rte_zmalloc("otx2_nix_tm_node",
782 sizeof(struct otx2_nix_tm_node), 0);
787 tm_node->hw_lvl = hw_lvl;
789 /* Maintain minimum weight */
793 tm_node->id = node_id;
794 tm_node->priority = priority;
795 tm_node->weight = weight;
796 tm_node->rr_prio = 0xf;
797 tm_node->max_prio = UINT32_MAX;
798 tm_node->hw_id = UINT32_MAX;
801 tm_node->flags = NIX_TM_NODE_USER;
802 rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
805 profile->reference_count++;
807 tm_node->parent = parent_node;
808 tm_node->parent_hw_id = UINT32_MAX;
809 shaper_default_red_algo(dev, tm_node, profile);
811 TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
817 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
819 struct otx2_nix_tm_shaper_profile *shaper_profile;
821 while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
822 if (shaper_profile->reference_count)
823 otx2_tm_dbg("Shaper profile %u has non zero references",
824 shaper_profile->shaper_profile_id);
825 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
826 rte_free(shaper_profile);
833 nix_clear_path_xoff(struct otx2_eth_dev *dev,
834 struct otx2_nix_tm_node *tm_node)
836 struct nix_txschq_config *req;
837 struct otx2_nix_tm_node *p;
840 /* Manipulating SW_XOFF not supported on Ax */
841 if (otx2_dev_is_Ax(dev))
844 /* Enable nodes in path for flush to succeed */
845 if (!nix_tm_is_leaf(dev, tm_node->lvl))
850 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
851 (p->flags & NIX_TM_NODE_HWRES)) {
852 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
853 req->lvl = p->hw_lvl;
854 req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
856 rc = otx2_mbox_process(dev->mbox);
860 p->flags |= NIX_TM_NODE_ENABLED;
869 nix_smq_xoff(struct otx2_eth_dev *dev,
870 struct otx2_nix_tm_node *tm_node,
873 struct otx2_mbox *mbox = dev->mbox;
874 struct nix_txschq_config *req;
878 smq = tm_node->hw_id;
879 otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
880 enable ? "enable" : "disable");
882 rc = nix_clear_path_xoff(dev, tm_node);
886 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
887 req->lvl = NIX_TXSCH_LVL_SMQ;
890 req->reg[0] = NIX_AF_SMQX_CFG(smq);
891 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
892 req->regval_mask[0] = enable ?
893 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
895 return otx2_mbox_process(mbox);
899 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
901 struct otx2_eth_txq *txq = __txq;
902 struct npa_aq_enq_req *req;
903 struct npa_aq_enq_rsp *rsp;
904 struct otx2_npa_lf *lf;
905 struct otx2_mbox *mbox;
906 uint64_t aura_handle;
909 otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
910 enable ? "enable" : "disable");
912 lf = otx2_npa_lf_obj_get();
916 /* Set/clear sqb aura fc_ena */
917 aura_handle = txq->sqb_pool->pool_id;
918 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
920 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
921 req->ctype = NPA_AQ_CTYPE_AURA;
922 req->op = NPA_AQ_INSTOP_WRITE;
923 /* Below is not needed for aura writes but AF driver needs it */
924 /* AF will translate to associated poolctx */
925 req->aura.pool_addr = req->aura_id;
927 req->aura.fc_ena = enable;
928 req->aura_mask.fc_ena = 1;
930 rc = otx2_mbox_process(mbox);
934 /* Read back npa aura ctx */
935 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
937 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
938 req->ctype = NPA_AQ_CTYPE_AURA;
939 req->op = NPA_AQ_INSTOP_READ;
941 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
945 /* Init when enabled as there might be no triggers */
947 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
949 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
950 /* Sync write barrier */
957 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
959 uint16_t sqb_cnt, head_off, tail_off;
960 struct otx2_eth_dev *dev = txq->dev;
961 uint64_t wdata, val, prev;
962 uint16_t sq = txq->sq;
964 uint64_t timeout;/* 10's of usec */
966 /* Wait for enough time based on shaper min rate */
967 timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
968 timeout = timeout / dev->tm_rate_min;
972 wdata = ((uint64_t)sq << 32);
973 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
974 val = otx2_atomic64_add_nosync(wdata, regaddr);
976 /* Spin multiple iterations as "txq->fc_cache_pkts" can still
977 * have space to send pkts even though fc_mem is disabled
983 val = otx2_atomic64_add_nosync(wdata, regaddr);
984 /* Continue on error */
985 if (val & BIT_ULL(63))
991 sqb_cnt = val & 0xFFFF;
992 head_off = (val >> 20) & 0x3F;
993 tail_off = (val >> 28) & 0x3F;
995 /* SQ reached quiescent state */
996 if (sqb_cnt <= 1 && head_off == tail_off &&
997 (*txq->fc_mem == txq->nb_sqb_bufs)) {
1009 otx2_nix_tm_dump(dev);
1013 /* Flush and disable tx queue and its parent SMQ */
1014 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
1016 struct otx2_nix_tm_node *tm_node, *sibling;
1017 struct otx2_eth_txq *txq;
1018 struct otx2_eth_dev *dev;
1027 user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1029 /* Find the node for this SQ */
1030 tm_node = nix_tm_node_search(dev, sq, user);
1031 if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1032 otx2_err("Invalid node/state for sq %u", sq);
1036 /* Enable CGX RXTX to drain pkts */
1038 /* Though it enables both RX MCAM Entries and CGX Link
1039 * we assume all the rx queues are stopped way back.
1041 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1042 rc = otx2_mbox_process(dev->mbox);
1044 otx2_err("cgx start failed, rc=%d", rc);
1049 /* Disable smq xoff for case it was enabled earlier */
1050 rc = nix_smq_xoff(dev, tm_node->parent, false);
1052 otx2_err("Failed to enable smq %u, rc=%d",
1053 tm_node->parent->hw_id, rc);
1057 /* As per HRM, to disable an SQ, all other SQ's
1058 * that feed to same SMQ must be paused before SMQ flush.
1060 TAILQ_FOREACH(sibling, &dev->node_list, node) {
1061 if (sibling->parent != tm_node->parent)
1063 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1067 txq = dev->eth_dev->data->tx_queues[sq];
1071 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1073 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1077 /* Wait for sq entries to be flushed */
1078 rc = nix_txq_flush_sq_spin(txq);
1080 otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1085 tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1087 /* Disable and flush */
1088 rc = nix_smq_xoff(dev, tm_node->parent, true);
1090 otx2_err("Failed to disable smq %u, rc=%d",
1091 tm_node->parent->hw_id, rc);
1095 /* Restore cgx state */
1097 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1098 rc |= otx2_mbox_process(dev->mbox);
1104 int otx2_nix_sq_flush_post(void *_txq)
1106 struct otx2_nix_tm_node *tm_node, *sibling;
1107 struct otx2_eth_txq *txq = _txq;
1108 struct otx2_eth_txq *s_txq;
1109 struct otx2_eth_dev *dev;
1117 user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1119 /* Find the node for this SQ */
1120 tm_node = nix_tm_node_search(dev, sq, user);
1122 otx2_err("Invalid node for sq %u", sq);
1126 /* Enable all the siblings back */
1127 TAILQ_FOREACH(sibling, &dev->node_list, node) {
1128 if (sibling->parent != tm_node->parent)
1131 if (sibling->id == sq)
1134 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1138 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1143 /* Enable back if any SQ is still present */
1144 rc = nix_smq_xoff(dev, tm_node->parent, false);
1146 otx2_err("Failed to enable smq %u, rc=%d",
1147 tm_node->parent->hw_id, rc);
1153 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1155 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1164 nix_sq_sched_data(struct otx2_eth_dev *dev,
1165 struct otx2_nix_tm_node *tm_node,
1166 bool rr_quantum_only)
1168 struct rte_eth_dev *eth_dev = dev->eth_dev;
1169 struct otx2_mbox *mbox = dev->mbox;
1170 uint16_t sq = tm_node->id, smq;
1171 struct nix_aq_enq_req *req;
1172 uint64_t rr_quantum;
1175 smq = tm_node->parent->hw_id;
1176 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1178 if (rr_quantum_only)
1179 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1181 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1182 sq, smq, rr_quantum);
1184 if (sq > eth_dev->data->nb_tx_queues)
1187 req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1189 req->ctype = NIX_AQ_CTYPE_SQ;
1190 req->op = NIX_AQ_INSTOP_WRITE;
1192 /* smq update only when needed */
1193 if (!rr_quantum_only) {
1195 req->sq_mask.smq = ~req->sq_mask.smq;
1197 req->sq.smq_rr_quantum = rr_quantum;
1198 req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1200 rc = otx2_mbox_process(mbox);
1202 otx2_err("Failed to set smq, rc=%d", rc);
1206 int otx2_nix_sq_enable(void *_txq)
1208 struct otx2_eth_txq *txq = _txq;
1211 /* Enable sqb_aura fc */
1212 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1214 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1222 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1223 uint32_t flags, bool hw_only)
1225 struct otx2_nix_tm_shaper_profile *profile;
1226 struct otx2_nix_tm_node *tm_node, *next_node;
1227 struct otx2_mbox *mbox = dev->mbox;
1228 struct nix_txsch_free_req *req;
1229 uint32_t profile_id;
1232 next_node = TAILQ_FIRST(&dev->node_list);
1234 tm_node = next_node;
1235 next_node = TAILQ_NEXT(tm_node, node);
1237 /* Check for only requested nodes */
1238 if ((tm_node->flags & flags_mask) != flags)
1241 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1242 tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1243 tm_node->flags & NIX_TM_NODE_HWRES) {
1244 /* Free specific HW resource */
1245 otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1246 nix_hwlvl2str(tm_node->hw_lvl),
1247 tm_node->hw_id, tm_node->lvl,
1248 tm_node->id, tm_node);
1250 rc = nix_clear_path_xoff(dev, tm_node);
1254 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1256 req->schq_lvl = tm_node->hw_lvl;
1257 req->schq = tm_node->hw_id;
1258 rc = otx2_mbox_process(mbox);
1261 tm_node->flags &= ~NIX_TM_NODE_HWRES;
1264 /* Leave software elements if needed */
1268 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1269 tm_node->lvl, tm_node->id, tm_node);
1271 profile_id = tm_node->params.shaper_profile_id;
1272 profile = nix_tm_shaper_profile_search(dev, profile_id);
1274 profile->reference_count--;
1276 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1281 /* Free all hw resources */
1282 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1283 req->flags = TXSCHQ_FREE_ALL;
1285 return otx2_mbox_process(mbox);
1292 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1293 struct nix_txsch_alloc_rsp *rsp)
1298 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1299 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1300 dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1301 dev->txschq_contig_list[lvl][schq] =
1302 rsp->schq_contig_list[lvl][schq];
1305 dev->txschq[lvl] = rsp->schq[lvl];
1306 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1312 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1313 struct otx2_nix_tm_node *child,
1314 struct otx2_nix_tm_node *parent)
1316 uint32_t hw_id, schq_con_index, prio_offset;
1317 uint32_t l_id, schq_index;
1319 otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1320 nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1322 child->flags |= NIX_TM_NODE_HWRES;
1324 /* Process root nodes */
1325 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1326 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1328 uint32_t tschq_con_index;
1330 l_id = child->hw_lvl;
1331 tschq_con_index = dev->txschq_contig_index[l_id];
1332 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1333 child->hw_id = hw_id;
1334 dev->txschq_contig_index[l_id]++;
1335 /* Update TL1 hw_id for its parent for config purpose */
1336 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1337 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1338 child->parent_hw_id = hw_id;
1341 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1342 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1343 uint32_t tschq_con_index;
1345 l_id = child->hw_lvl;
1346 tschq_con_index = dev->txschq_index[l_id];
1347 hw_id = dev->txschq_list[l_id][tschq_con_index];
1348 child->hw_id = hw_id;
1349 dev->txschq_index[l_id]++;
1353 /* Process children with parents */
1354 l_id = child->hw_lvl;
1355 schq_index = dev->txschq_index[l_id];
1356 schq_con_index = dev->txschq_contig_index[l_id];
1358 if (child->priority == parent->rr_prio) {
1359 hw_id = dev->txschq_list[l_id][schq_index];
1360 child->hw_id = hw_id;
1361 child->parent_hw_id = parent->hw_id;
1362 dev->txschq_index[l_id]++;
1364 prio_offset = schq_con_index + child->priority;
1365 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1366 child->hw_id = hw_id;
1372 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1374 struct otx2_nix_tm_node *parent, *child;
1375 uint32_t child_hw_lvl, con_index_inc, i;
1377 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1378 TAILQ_FOREACH(parent, &dev->node_list, node) {
1379 child_hw_lvl = parent->hw_lvl - 1;
1380 if (parent->hw_lvl != i)
1382 TAILQ_FOREACH(child, &dev->node_list, node) {
1385 if (child->parent->id != parent->id)
1387 nix_tm_assign_id_to_node(dev, child, parent);
1390 con_index_inc = parent->max_prio + 1;
1391 dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1394 * Explicitly assign id to parent node if it
1395 * doesn't have a parent
1397 if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1398 nix_tm_assign_id_to_node(dev, parent, NULL);
1405 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1406 struct nix_txsch_alloc_req *req, uint8_t lvl)
1408 struct otx2_nix_tm_node *tm_node;
1409 uint8_t contig_count;
1411 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1412 if (lvl == tm_node->hw_lvl) {
1413 req->schq[lvl - 1] += tm_node->rr_num;
1414 if (tm_node->max_prio != UINT32_MAX) {
1415 contig_count = tm_node->max_prio + 1;
1416 req->schq_contig[lvl - 1] += contig_count;
1419 if (lvl == dev->otx2_tm_root_lvl &&
1420 dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1421 tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1422 req->schq_contig[dev->otx2_tm_root_lvl]++;
1426 req->schq[NIX_TXSCH_LVL_TL1] = 1;
1427 req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1433 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1434 struct nix_txsch_alloc_req *req)
1438 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1439 nix_tm_count_req_schq(dev, req, i);
1441 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1442 dev->txschq_index[i] = 0;
1443 dev->txschq_contig_index[i] = 0;
1449 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1451 struct otx2_mbox *mbox = dev->mbox;
1452 struct nix_txsch_alloc_req *req;
1453 struct nix_txsch_alloc_rsp *rsp;
1456 req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1458 rc = nix_tm_prepare_txschq_req(dev, req);
1462 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1466 nix_tm_copy_rsp_to_dev(dev, rsp);
1467 dev->link_cfg_lvl = rsp->link_cfg_lvl;
1469 nix_tm_assign_hw_id(dev);
1474 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1476 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1477 struct otx2_nix_tm_node *tm_node;
1478 struct otx2_eth_txq *txq;
1482 nix_tm_update_parent_info(dev);
1484 rc = nix_tm_send_txsch_alloc_msg(dev);
1486 otx2_err("TM failed to alloc tm resources=%d", rc);
1490 rc = nix_tm_txsch_reg_config(dev);
1492 otx2_err("TM failed to configure sched registers=%d", rc);
1496 /* Trigger MTU recalculate as SMQ needs MTU conf */
1497 if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1498 rc = otx2_nix_recalc_mtu(eth_dev);
1500 otx2_err("TM MTU update failed, rc=%d", rc);
1505 /* Mark all non-leaf's as enabled */
1506 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1507 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1508 tm_node->flags |= NIX_TM_NODE_ENABLED;
1514 /* Update SQ Sched Data while SQ is idle */
1515 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1516 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1519 rc = nix_sq_sched_data(dev, tm_node, false);
1521 otx2_err("SQ %u sched update failed, rc=%d",
1527 /* Finally XON all SMQ's */
1528 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1529 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1532 rc = nix_smq_xoff(dev, tm_node, false);
1534 otx2_err("Failed to enable smq %u, rc=%d",
1535 tm_node->hw_id, rc);
1540 /* Enable xmit as all the topology is ready */
1541 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1542 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1546 txq = eth_dev->data->tx_queues[sq];
1548 rc = otx2_nix_sq_enable(txq);
1550 otx2_err("TM sw xon failed on SQ %u, rc=%d",
1554 tm_node->flags |= NIX_TM_NODE_ENABLED;
1561 send_tm_reqval(struct otx2_mbox *mbox,
1562 struct nix_txschq_config *req,
1563 struct rte_tm_error *error)
1567 if (!req->num_regs ||
1568 req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1569 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1570 error->message = "invalid config";
1574 rc = otx2_mbox_process(mbox);
1576 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1577 error->message = "unexpected fatal error";
1583 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1585 if (nix_tm_have_tl1_access(dev)) {
1587 case OTX2_TM_LVL_ROOT:
1588 return NIX_TXSCH_LVL_TL1;
1589 case OTX2_TM_LVL_SCH1:
1590 return NIX_TXSCH_LVL_TL2;
1591 case OTX2_TM_LVL_SCH2:
1592 return NIX_TXSCH_LVL_TL3;
1593 case OTX2_TM_LVL_SCH3:
1594 return NIX_TXSCH_LVL_TL4;
1595 case OTX2_TM_LVL_SCH4:
1596 return NIX_TXSCH_LVL_SMQ;
1598 return NIX_TXSCH_LVL_CNT;
1602 case OTX2_TM_LVL_ROOT:
1603 return NIX_TXSCH_LVL_TL2;
1604 case OTX2_TM_LVL_SCH1:
1605 return NIX_TXSCH_LVL_TL3;
1606 case OTX2_TM_LVL_SCH2:
1607 return NIX_TXSCH_LVL_TL4;
1608 case OTX2_TM_LVL_SCH3:
1609 return NIX_TXSCH_LVL_SMQ;
1611 return NIX_TXSCH_LVL_CNT;
1617 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1619 if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1622 /* MDQ doesn't support SP */
1623 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1626 /* PF's TL1 with VF's enabled doesn't support SP */
1627 if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1628 (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1629 (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1632 return TXSCH_TLX_SP_PRIO_MAX - 1;
1637 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1638 uint32_t parent_id, uint32_t priority,
1639 struct rte_tm_error *error)
1641 uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1642 struct otx2_nix_tm_node *tm_node;
1643 uint32_t rr_num = 0;
1646 /* Validate priority against max */
1647 if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1648 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1649 error->message = "unsupported priority value";
1653 if (parent_id == RTE_TM_NODE_ID_NULL)
1656 memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1657 priorities[priority] = 1;
1659 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1660 if (!tm_node->parent)
1663 if (!(tm_node->flags & NIX_TM_NODE_USER))
1666 if (tm_node->parent->id != parent_id)
1669 priorities[tm_node->priority]++;
1672 for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1673 if (priorities[i] > 1)
1676 /* At max, one rr groups per parent */
1678 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1679 error->message = "multiple DWRR node priority";
1683 /* Check for previous priority to avoid holes in priorities */
1684 if (priority && !priorities[priority - 1]) {
1685 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1686 error->message = "priority not in order";
1694 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1695 uint64_t *regval, uint32_t hw_lvl)
1697 volatile struct nix_txschq_config *req;
1698 struct nix_txschq_config *rsp;
1701 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1707 rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1710 *regval = rsp->regval[0];
1714 /* Search for min rate in topology */
1716 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1718 struct otx2_nix_tm_shaper_profile *profile;
1719 uint64_t rate_min = 1E9; /* 1 Gbps */
1721 TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1722 if (profile->params.peak.rate &&
1723 profile->params.peak.rate < rate_min)
1724 rate_min = profile->params.peak.rate;
1726 if (profile->params.committed.rate &&
1727 profile->params.committed.rate < rate_min)
1728 rate_min = profile->params.committed.rate;
1731 dev->tm_rate_min = rate_min;
1735 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1737 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1738 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1739 uint16_t sqb_cnt, head_off, tail_off;
1740 struct otx2_nix_tm_node *tm_node;
1741 struct otx2_eth_txq *txq;
1742 uint64_t wdata, val;
1745 otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1747 /* Enable CGX RXTX to drain pkts */
1748 if (!eth_dev->data->dev_started) {
1749 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1750 rc = otx2_mbox_process(dev->mbox);
1756 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1757 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1759 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1762 rc = nix_smq_xoff(dev, tm_node, false);
1764 otx2_err("Failed to enable smq %u, rc=%d",
1765 tm_node->hw_id, rc);
1770 /* Flush all tx queues */
1771 for (i = 0; i < sq_cnt; i++) {
1772 txq = eth_dev->data->tx_queues[i];
1774 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1776 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1780 /* Wait for sq entries to be flushed */
1781 rc = nix_txq_flush_sq_spin(txq);
1783 otx2_err("Failed to drain sq, rc=%d\n", rc);
1788 /* XOFF & Flush all SMQ's. HRM mandates
1789 * all SQ's empty before SMQ flush is issued.
1791 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1792 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1794 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1797 rc = nix_smq_xoff(dev, tm_node, true);
1799 otx2_err("Failed to enable smq %u, rc=%d",
1800 tm_node->hw_id, rc);
1805 /* Verify sanity of all tx queues */
1806 for (i = 0; i < sq_cnt; i++) {
1807 txq = eth_dev->data->tx_queues[i];
1809 wdata = ((uint64_t)txq->sq << 32);
1810 val = otx2_atomic64_add_nosync(wdata,
1811 (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1813 sqb_cnt = val & 0xFFFF;
1814 head_off = (val >> 20) & 0x3F;
1815 tail_off = (val >> 28) & 0x3F;
1817 if (sqb_cnt > 1 || head_off != tail_off ||
1818 (*txq->fc_mem != txq->nb_sqb_bufs))
1819 otx2_err("Failed to gracefully flush sq %u", txq->sq);
1823 /* restore cgx state */
1824 if (!eth_dev->data->dev_started) {
1825 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1826 rc |= otx2_mbox_process(dev->mbox);
1833 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1834 int *is_leaf, struct rte_tm_error *error)
1836 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1837 struct otx2_nix_tm_node *tm_node;
1839 if (is_leaf == NULL) {
1840 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1844 tm_node = nix_tm_node_search(dev, node_id, true);
1845 if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1846 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1849 if (nix_tm_is_leaf(dev, tm_node->lvl))
1857 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1858 struct rte_tm_capabilities *cap,
1859 struct rte_tm_error *error)
1861 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1862 struct otx2_mbox *mbox = dev->mbox;
1863 int rc, max_nr_nodes = 0, i;
1864 struct free_rsrcs_rsp *rsp;
1866 memset(cap, 0, sizeof(*cap));
1868 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1869 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1871 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1872 error->message = "unexpected fatal error";
1876 for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1877 max_nr_nodes += rsp->schq[i];
1879 cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1880 /* TL1 level is reserved for PF */
1881 cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1882 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1883 cap->non_leaf_nodes_identical = 1;
1884 cap->leaf_nodes_identical = 1;
1886 /* Shaper Capabilities */
1887 cap->shaper_private_n_max = max_nr_nodes;
1888 cap->shaper_n_max = max_nr_nodes;
1889 cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1890 cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1891 cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1892 cap->shaper_pkt_length_adjust_min = 0;
1893 cap->shaper_pkt_length_adjust_max = 0;
1895 /* Schedule Capabilities */
1896 cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1897 cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1898 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1899 cap->sched_wfq_n_groups_max = 1;
1900 cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1902 cap->dynamic_update_mask =
1903 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1904 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1906 RTE_TM_STATS_N_PKTS |
1907 RTE_TM_STATS_N_BYTES |
1908 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1909 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1911 for (i = 0; i < RTE_COLORS; i++) {
1912 cap->mark_vlan_dei_supported[i] = false;
1913 cap->mark_ip_ecn_tcp_supported[i] = false;
1914 cap->mark_ip_dscp_supported[i] = false;
1921 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1922 struct rte_tm_level_capabilities *cap,
1923 struct rte_tm_error *error)
1925 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1926 struct otx2_mbox *mbox = dev->mbox;
1927 struct free_rsrcs_rsp *rsp;
1931 memset(cap, 0, sizeof(*cap));
1933 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1934 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1936 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1937 error->message = "unexpected fatal error";
1941 hw_lvl = nix_tm_lvl2nix(dev, lvl);
1943 if (nix_tm_is_leaf(dev, lvl)) {
1945 cap->n_nodes_max = dev->tm_leaf_cnt;
1946 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1947 cap->leaf_nodes_identical = 1;
1948 cap->leaf.stats_mask =
1949 RTE_TM_STATS_N_PKTS |
1950 RTE_TM_STATS_N_BYTES;
1952 } else if (lvl == OTX2_TM_LVL_ROOT) {
1953 /* Root node, aka TL2(vf)/TL1(pf) */
1954 cap->n_nodes_max = 1;
1955 cap->n_nodes_nonleaf_max = 1;
1956 cap->non_leaf_nodes_identical = 1;
1958 cap->nonleaf.shaper_private_supported = true;
1959 cap->nonleaf.shaper_private_dual_rate_supported =
1960 nix_tm_have_tl1_access(dev) ? false : true;
1961 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1962 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1964 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
1965 cap->nonleaf.sched_sp_n_priorities_max =
1966 nix_max_prio(dev, hw_lvl) + 1;
1967 cap->nonleaf.sched_wfq_n_groups_max = 1;
1968 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1970 if (nix_tm_have_tl1_access(dev))
1971 cap->nonleaf.stats_mask =
1972 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1973 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1974 } else if ((lvl < OTX2_TM_LVL_MAX) &&
1975 (hw_lvl < NIX_TXSCH_LVL_CNT)) {
1976 /* TL2, TL3, TL4, MDQ */
1977 cap->n_nodes_max = rsp->schq[hw_lvl];
1978 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
1979 cap->non_leaf_nodes_identical = 1;
1981 cap->nonleaf.shaper_private_supported = true;
1982 cap->nonleaf.shaper_private_dual_rate_supported = true;
1983 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1984 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1986 /* MDQ doesn't support Strict Priority */
1987 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1988 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
1990 cap->nonleaf.sched_n_children_max =
1991 rsp->schq[hw_lvl - 1];
1992 cap->nonleaf.sched_sp_n_priorities_max =
1993 nix_max_prio(dev, hw_lvl) + 1;
1994 cap->nonleaf.sched_wfq_n_groups_max = 1;
1995 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1997 /* unsupported level */
1998 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2005 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
2006 struct rte_tm_node_capabilities *cap,
2007 struct rte_tm_error *error)
2009 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2010 struct otx2_mbox *mbox = dev->mbox;
2011 struct otx2_nix_tm_node *tm_node;
2012 struct free_rsrcs_rsp *rsp;
2013 int rc, hw_lvl, lvl;
2015 memset(cap, 0, sizeof(*cap));
2017 tm_node = nix_tm_node_search(dev, node_id, true);
2019 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2020 error->message = "no such node";
2024 hw_lvl = tm_node->hw_lvl;
2028 if (nix_tm_is_leaf(dev, lvl)) {
2029 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2030 RTE_TM_STATS_N_BYTES;
2034 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2035 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2037 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2038 error->message = "unexpected fatal error";
2042 /* Non Leaf Shaper */
2043 cap->shaper_private_supported = true;
2044 cap->shaper_private_dual_rate_supported =
2045 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2046 cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2047 cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2049 /* Non Leaf Scheduler */
2050 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2051 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2053 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2055 cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2056 cap->nonleaf.sched_wfq_n_children_per_group_max =
2057 cap->nonleaf.sched_n_children_max;
2058 cap->nonleaf.sched_wfq_n_groups_max = 1;
2059 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2061 if (hw_lvl == NIX_TXSCH_LVL_TL1)
2062 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2063 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2068 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2069 uint32_t profile_id,
2070 struct rte_tm_shaper_params *params,
2071 struct rte_tm_error *error)
2073 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2074 struct otx2_nix_tm_shaper_profile *profile;
2076 profile = nix_tm_shaper_profile_search(dev, profile_id);
2078 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2079 error->message = "shaper profile ID exist";
2083 /* Committed rate and burst size can be enabled/disabled */
2084 if (params->committed.size || params->committed.rate) {
2085 if (params->committed.size < MIN_SHAPER_BURST ||
2086 params->committed.size > MAX_SHAPER_BURST) {
2088 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2090 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2091 NULL, NULL, NULL)) {
2093 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2094 error->message = "shaper committed rate invalid";
2099 /* Peak rate and burst size can be enabled/disabled */
2100 if (params->peak.size || params->peak.rate) {
2101 if (params->peak.size < MIN_SHAPER_BURST ||
2102 params->peak.size > MAX_SHAPER_BURST) {
2104 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2106 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2107 NULL, NULL, NULL)) {
2109 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2110 error->message = "shaper peak rate invalid";
2115 profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2116 sizeof(struct otx2_nix_tm_shaper_profile), 0);
2120 profile->shaper_profile_id = profile_id;
2121 rte_memcpy(&profile->params, params,
2122 sizeof(struct rte_tm_shaper_params));
2123 TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2125 otx2_tm_dbg("Added TM shaper profile %u, "
2126 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2127 ", cbs %" PRIu64 " , adj %u",
2129 params->peak.rate * 8,
2131 params->committed.rate * 8,
2132 params->committed.size,
2133 params->pkt_length_adjust);
2135 /* Translate rate as bits per second */
2136 profile->params.peak.rate = profile->params.peak.rate * 8;
2137 profile->params.committed.rate = profile->params.committed.rate * 8;
2138 /* Always use PIR for single rate shaping */
2139 if (!params->peak.rate && params->committed.rate) {
2140 profile->params.peak = profile->params.committed;
2141 memset(&profile->params.committed, 0,
2142 sizeof(profile->params.committed));
2145 /* update min rate */
2146 nix_tm_shaper_profile_update_min(dev);
2151 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2152 uint32_t profile_id,
2153 struct rte_tm_error *error)
2155 struct otx2_nix_tm_shaper_profile *profile;
2156 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2158 profile = nix_tm_shaper_profile_search(dev, profile_id);
2161 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2162 error->message = "shaper profile ID not exist";
2166 if (profile->reference_count) {
2167 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2168 error->message = "shaper profile in use";
2172 otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2173 TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2176 /* update min rate */
2177 nix_tm_shaper_profile_update_min(dev);
2182 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2183 uint32_t parent_node_id, uint32_t priority,
2184 uint32_t weight, uint32_t lvl,
2185 struct rte_tm_node_params *params,
2186 struct rte_tm_error *error)
2188 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2189 struct otx2_nix_tm_node *parent_node;
2190 int rc, clear_on_fail = 0;
2191 uint32_t exp_next_lvl;
2194 /* we don't support dynamic updates */
2195 if (dev->tm_flags & NIX_TM_COMMITTED) {
2196 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2197 error->message = "dynamic update not supported";
2201 /* Leaf nodes have to be same priority */
2202 if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2203 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2204 error->message = "queue shapers must be priority 0";
2208 parent_node = nix_tm_node_search(dev, parent_node_id, true);
2210 /* find the right level */
2211 if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2212 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2213 lvl = OTX2_TM_LVL_ROOT;
2214 } else if (parent_node) {
2215 lvl = parent_node->lvl + 1;
2217 /* Neigher proper parent nor proper level id given */
2218 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2219 error->message = "invalid parent node id";
2224 /* Translate rte_tm level id's to nix hw level id's */
2225 hw_lvl = nix_tm_lvl2nix(dev, lvl);
2226 if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2227 !nix_tm_is_leaf(dev, lvl)) {
2228 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2229 error->message = "invalid level id";
2233 if (node_id < dev->tm_leaf_cnt)
2234 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2236 exp_next_lvl = hw_lvl + 1;
2238 /* Check if there is no parent node yet */
2239 if (hw_lvl != dev->otx2_tm_root_lvl &&
2240 (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2241 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2242 error->message = "invalid parent node id";
2246 /* Check if a node already exists */
2247 if (nix_tm_node_search(dev, node_id, true)) {
2248 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2249 error->message = "node already exists";
2253 /* Check if shaper profile exists for non leaf node */
2254 if (!nix_tm_is_leaf(dev, lvl) &&
2255 params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
2256 !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) {
2257 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2258 error->message = "invalid shaper profile";
2262 /* Check if there is second DWRR already in siblings or holes in prio */
2263 if (validate_prio(dev, lvl, parent_node_id, priority, error))
2266 if (weight > MAX_SCHED_WEIGHT) {
2267 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2268 error->message = "max weight exceeded";
2272 rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2273 priority, weight, hw_lvl,
2276 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2277 /* cleanup user added nodes */
2279 nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2280 NIX_TM_NODE_USER, false);
2281 error->message = "failed to add node";
2284 error->type = RTE_TM_ERROR_TYPE_NONE;
2289 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2290 struct rte_tm_error *error)
2292 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2293 struct otx2_nix_tm_node *tm_node, *child_node;
2294 struct otx2_nix_tm_shaper_profile *profile;
2295 uint32_t profile_id;
2297 /* we don't support dynamic updates yet */
2298 if (dev->tm_flags & NIX_TM_COMMITTED) {
2299 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2300 error->message = "hierarchy exists";
2304 if (node_id == RTE_TM_NODE_ID_NULL) {
2305 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2306 error->message = "invalid node id";
2310 tm_node = nix_tm_node_search(dev, node_id, true);
2312 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2313 error->message = "no such node";
2317 /* Check for any existing children */
2318 TAILQ_FOREACH(child_node, &dev->node_list, node) {
2319 if (child_node->parent == tm_node) {
2320 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2321 error->message = "children exist";
2326 /* Remove shaper profile reference */
2327 profile_id = tm_node->params.shaper_profile_id;
2328 profile = nix_tm_shaper_profile_search(dev, profile_id);
2329 profile->reference_count--;
2331 TAILQ_REMOVE(&dev->node_list, tm_node, node);
2337 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2338 struct rte_tm_error *error, bool suspend)
2340 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2341 struct otx2_mbox *mbox = dev->mbox;
2342 struct otx2_nix_tm_node *tm_node;
2343 struct nix_txschq_config *req;
2347 tm_node = nix_tm_node_search(dev, node_id, true);
2349 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2350 error->message = "no such node";
2354 if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2355 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2356 error->message = "hierarchy doesn't exist";
2360 flags = tm_node->flags;
2361 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2362 (flags | NIX_TM_NODE_ENABLED);
2364 if (tm_node->flags == flags)
2367 /* send mbox for state change */
2368 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2370 req->lvl = tm_node->hw_lvl;
2371 req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2372 req->reg, req->regval);
2373 rc = send_tm_reqval(mbox, req, error);
2375 tm_node->flags = flags;
2380 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2381 struct rte_tm_error *error)
2383 return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2387 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2388 struct rte_tm_error *error)
2390 return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2394 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2396 struct rte_tm_error *error)
2398 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2399 struct otx2_nix_tm_node *tm_node;
2400 uint32_t leaf_cnt = 0;
2403 if (dev->tm_flags & NIX_TM_COMMITTED) {
2404 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2405 error->message = "hierarchy exists";
2409 /* Check if we have all the leaf nodes */
2410 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2411 if (tm_node->flags & NIX_TM_NODE_USER &&
2412 tm_node->id < dev->tm_leaf_cnt)
2416 if (leaf_cnt != dev->tm_leaf_cnt) {
2417 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2418 error->message = "incomplete hierarchy";
2423 * Disable xmit will be enabled when
2424 * new topology is available.
2426 rc = nix_xmit_disable(eth_dev);
2428 otx2_err("failed to disable TX, rc=%d", rc);
2432 /* Delete default/ratelimit tree */
2433 if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2434 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2436 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2437 error->message = "failed to free default resources";
2440 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2441 NIX_TM_RATE_LIMIT_TREE);
2444 /* Free up user alloc'ed resources */
2445 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2446 NIX_TM_NODE_USER, true);
2448 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2449 error->message = "failed to free user resources";
2453 rc = nix_tm_alloc_resources(eth_dev, true);
2455 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2456 error->message = "alloc resources failed";
2457 /* TODO should we restore default config ? */
2459 nix_tm_free_resources(dev, 0, 0, false);
2463 error->type = RTE_TM_ERROR_TYPE_NONE;
2464 dev->tm_flags |= NIX_TM_COMMITTED;
2469 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2471 uint32_t profile_id,
2472 struct rte_tm_error *error)
2474 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2475 struct otx2_nix_tm_shaper_profile *profile = NULL;
2476 struct otx2_mbox *mbox = dev->mbox;
2477 struct otx2_nix_tm_node *tm_node;
2478 struct nix_txschq_config *req;
2482 tm_node = nix_tm_node_search(dev, node_id, true);
2483 if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2484 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2485 error->message = "invalid node";
2489 if (profile_id == tm_node->params.shaper_profile_id)
2492 if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2493 profile = nix_tm_shaper_profile_search(dev, profile_id);
2495 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2496 error->message = "shaper profile ID not exist";
2501 tm_node->params.shaper_profile_id = profile_id;
2503 /* Nothing to do if not yet committed */
2504 if (!(dev->tm_flags & NIX_TM_COMMITTED))
2507 tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2509 /* Flush the specific node with SW_XOFF */
2510 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2511 req->lvl = tm_node->hw_lvl;
2512 k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2515 rc = send_tm_reqval(mbox, req, error);
2519 shaper_default_red_algo(dev, tm_node, profile);
2521 /* Update the PIR/CIR and clear SW XOFF */
2522 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2523 req->lvl = tm_node->hw_lvl;
2525 k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2527 k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2530 rc = send_tm_reqval(mbox, req, error);
2532 tm_node->flags |= NIX_TM_NODE_ENABLED;
2537 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2538 uint32_t node_id, uint32_t new_parent_id,
2539 uint32_t priority, uint32_t weight,
2540 struct rte_tm_error *error)
2542 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2543 struct otx2_nix_tm_node *tm_node, *sibling;
2544 struct otx2_nix_tm_node *new_parent;
2545 struct nix_txschq_config *req;
2549 if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2550 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2551 error->message = "hierarchy doesn't exist";
2555 tm_node = nix_tm_node_search(dev, node_id, true);
2557 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2558 error->message = "no such node";
2562 /* Parent id valid only for non root nodes */
2563 if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2564 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2566 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2567 error->message = "no such parent node";
2571 /* Current support is only for dynamic weight update */
2572 if (tm_node->parent != new_parent ||
2573 tm_node->priority != priority) {
2574 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2575 error->message = "only weight update supported";
2580 /* Skip if no change */
2581 if (tm_node->weight == weight)
2584 tm_node->weight = weight;
2586 /* For leaf nodes, SQ CTX needs update */
2587 if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2588 /* Update SQ quantum data on the fly */
2589 rc = nix_sq_sched_data(dev, tm_node, true);
2591 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2592 error->message = "sq sched data update failed";
2596 /* XOFF Parent node */
2597 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2598 req->lvl = tm_node->parent->hw_lvl;
2599 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2600 req->reg, req->regval);
2601 rc = send_tm_reqval(dev->mbox, req, error);
2605 /* XOFF this node and all other siblings */
2606 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2607 req->lvl = tm_node->hw_lvl;
2610 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2611 if (sibling->parent != tm_node->parent)
2613 k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2617 rc = send_tm_reqval(dev->mbox, req, error);
2621 /* Update new weight for current node */
2622 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2623 req->lvl = tm_node->hw_lvl;
2624 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2625 req->reg, req->regval);
2626 rc = send_tm_reqval(dev->mbox, req, error);
2630 /* XON this node and all other siblings */
2631 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2632 req->lvl = tm_node->hw_lvl;
2635 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2636 if (sibling->parent != tm_node->parent)
2638 k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2642 rc = send_tm_reqval(dev->mbox, req, error);
2646 /* XON Parent node */
2647 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2648 req->lvl = tm_node->parent->hw_lvl;
2649 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2650 req->reg, req->regval);
2651 rc = send_tm_reqval(dev->mbox, req, error);
2659 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2660 struct rte_tm_node_stats *stats,
2661 uint64_t *stats_mask, int clear,
2662 struct rte_tm_error *error)
2664 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2665 struct otx2_nix_tm_node *tm_node;
2670 tm_node = nix_tm_node_search(dev, node_id, true);
2672 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2673 error->message = "no such node";
2677 /* Stats support only for leaf node or TL1 root */
2678 if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2679 reg = (((uint64_t)tm_node->id) << 32);
2682 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2683 val = otx2_atomic64_add_nosync(reg, addr);
2686 stats->n_pkts = val - tm_node->last_pkts;
2689 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2690 val = otx2_atomic64_add_nosync(reg, addr);
2693 stats->n_bytes = val - tm_node->last_bytes;
2696 tm_node->last_pkts = stats->n_pkts;
2697 tm_node->last_bytes = stats->n_bytes;
2700 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2702 } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2703 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2704 error->message = "stats read error";
2706 /* RED Drop packets */
2707 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2708 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2711 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2712 val - tm_node->last_pkts;
2714 /* RED Drop bytes */
2715 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2716 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2719 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2720 val - tm_node->last_bytes;
2724 tm_node->last_pkts =
2725 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2726 tm_node->last_bytes =
2727 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2730 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2731 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2734 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2735 error->message = "unsupported node";
2743 const struct rte_tm_ops otx2_tm_ops = {
2744 .node_type_get = otx2_nix_tm_node_type_get,
2746 .capabilities_get = otx2_nix_tm_capa_get,
2747 .level_capabilities_get = otx2_nix_tm_level_capa_get,
2748 .node_capabilities_get = otx2_nix_tm_node_capa_get,
2750 .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2751 .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2753 .node_add = otx2_nix_tm_node_add,
2754 .node_delete = otx2_nix_tm_node_delete,
2755 .node_suspend = otx2_nix_tm_node_suspend,
2756 .node_resume = otx2_nix_tm_node_resume,
2757 .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2759 .node_shaper_update = otx2_nix_tm_node_shaper_update,
2760 .node_parent_update = otx2_nix_tm_node_parent_update,
2761 .node_stats_read = otx2_nix_tm_node_stats_read,
2765 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2767 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2768 uint32_t def = eth_dev->data->nb_tx_queues;
2769 struct rte_tm_node_params params;
2770 uint32_t leaf_parent, i;
2771 int rc = 0, leaf_level;
2773 /* Default params */
2774 memset(¶ms, 0, sizeof(params));
2775 params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2777 if (nix_tm_have_tl1_access(dev)) {
2778 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2779 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2782 OTX2_TM_LVL_ROOT, false, ¶ms);
2785 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2788 OTX2_TM_LVL_SCH1, false, ¶ms);
2792 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2795 OTX2_TM_LVL_SCH2, false, ¶ms);
2799 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2802 OTX2_TM_LVL_SCH3, false, ¶ms);
2806 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2809 OTX2_TM_LVL_SCH4, false, ¶ms);
2813 leaf_parent = def + 4;
2814 leaf_level = OTX2_TM_LVL_QUEUE;
2816 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2817 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2820 OTX2_TM_LVL_ROOT, false, ¶ms);
2824 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2827 OTX2_TM_LVL_SCH1, false, ¶ms);
2831 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2834 OTX2_TM_LVL_SCH2, false, ¶ms);
2838 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2841 OTX2_TM_LVL_SCH3, false, ¶ms);
2845 leaf_parent = def + 3;
2846 leaf_level = OTX2_TM_LVL_SCH4;
2849 /* Add leaf nodes */
2850 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2851 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2854 leaf_level, false, ¶ms);
2863 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2865 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2867 TAILQ_INIT(&dev->node_list);
2868 TAILQ_INIT(&dev->shaper_profile_list);
2869 dev->tm_rate_min = 1E9; /* 1Gbps */
2872 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2874 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2875 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2876 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2879 /* Free up all resources already held */
2880 rc = nix_tm_free_resources(dev, 0, 0, false);
2882 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2886 /* Clear shaper profiles */
2887 nix_tm_clear_shaper_profiles(dev);
2888 dev->tm_flags = NIX_TM_DEFAULT_TREE;
2890 /* Disable TL1 Static Priority when VF's are enabled
2891 * as otherwise VF's TL2 reallocation will be needed
2892 * runtime to support a specific topology of PF.
2894 if (pci_dev->max_vfs)
2895 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2897 rc = nix_tm_prepare_default_tree(eth_dev);
2901 rc = nix_tm_alloc_resources(eth_dev, false);
2904 dev->tm_leaf_cnt = sq_cnt;
2910 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
2912 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2913 uint32_t def = eth_dev->data->nb_tx_queues;
2914 struct rte_tm_node_params params;
2915 uint32_t leaf_parent, i, rc = 0;
2917 memset(¶ms, 0, sizeof(params));
2919 if (nix_tm_have_tl1_access(dev)) {
2920 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2921 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2924 OTX2_TM_LVL_ROOT, false, ¶ms);
2927 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2930 OTX2_TM_LVL_SCH1, false, ¶ms);
2933 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2936 OTX2_TM_LVL_SCH2, false, ¶ms);
2939 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2942 OTX2_TM_LVL_SCH3, false, ¶ms);
2945 leaf_parent = def + 3;
2947 /* Add per queue SMQ nodes */
2948 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2949 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2951 0, DEFAULT_RR_WEIGHT,
2959 /* Add leaf nodes */
2960 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2961 rc = nix_tm_node_add_to_list(dev, i,
2962 leaf_parent + 1 + i, 0,
2974 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2975 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2976 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
2977 OTX2_TM_LVL_ROOT, false, ¶ms);
2980 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2981 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
2982 OTX2_TM_LVL_SCH1, false, ¶ms);
2985 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2986 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
2987 OTX2_TM_LVL_SCH2, false, ¶ms);
2990 leaf_parent = def + 2;
2992 /* Add per queue SMQ nodes */
2993 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2994 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2996 0, DEFAULT_RR_WEIGHT,
3004 /* Add leaf nodes */
3005 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3006 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
3019 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3020 struct otx2_nix_tm_node *tm_node,
3023 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3024 struct otx2_nix_tm_shaper_profile profile;
3025 struct otx2_mbox *mbox = dev->mbox;
3026 volatile uint64_t *reg, *regval;
3027 struct nix_txschq_config *req;
3032 flags = tm_node->flags;
3034 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3035 req->lvl = NIX_TXSCH_LVL_MDQ;
3037 regval = req->regval;
3040 k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]);
3041 flags &= ~NIX_TM_NODE_ENABLED;
3045 if (!(flags & NIX_TM_NODE_ENABLED)) {
3046 k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]);
3047 flags |= NIX_TM_NODE_ENABLED;
3050 /* Use only PIR for rate limit */
3051 memset(&profile, 0, sizeof(profile));
3052 profile.params.peak.rate = tx_rate;
3053 /* Minimum burst of ~4us Bytes of Tx */
3054 profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3055 (4ull * tx_rate) / (1E6 * 8));
3056 if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3057 dev->tm_rate_min = tx_rate;
3059 k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]);
3062 rc = otx2_mbox_process(mbox);
3066 tm_node->flags = flags;
3071 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3072 uint16_t queue_idx, uint16_t tx_rate_mbps)
3074 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3075 uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3076 struct otx2_nix_tm_node *tm_node;
3079 /* Check for supported revisions */
3080 if (otx2_dev_is_95xx_Ax(dev) ||
3081 otx2_dev_is_96xx_Ax(dev))
3084 if (queue_idx >= eth_dev->data->nb_tx_queues)
3087 if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3088 !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3091 if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3092 eth_dev->data->nb_tx_queues > 1) {
3093 /* For TM topology change ethdev needs to be stopped */
3094 if (eth_dev->data->dev_started)
3098 * Disable xmit will be enabled when
3099 * new topology is available.
3101 rc = nix_xmit_disable(eth_dev);
3103 otx2_err("failed to disable TX, rc=%d", rc);
3107 rc = nix_tm_free_resources(dev, 0, 0, false);
3109 otx2_tm_dbg("failed to free default resources, rc %d",
3114 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3116 otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3120 rc = nix_tm_alloc_resources(eth_dev, true);
3122 otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3126 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3127 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3130 tm_node = nix_tm_node_search(dev, queue_idx, false);
3132 /* check if we found a valid leaf node */
3134 !nix_tm_is_leaf(dev, tm_node->lvl) ||
3136 tm_node->parent->hw_id == UINT32_MAX)
3139 return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3141 otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3146 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3148 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3153 /* Check for supported revisions */
3154 if (otx2_dev_is_95xx_Ax(dev) ||
3155 otx2_dev_is_96xx_Ax(dev))
3158 *(const void **)arg = &otx2_tm_ops;
3164 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3166 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3169 /* Xmit is assumed to be disabled */
3170 /* Free up resources already held */
3171 rc = nix_tm_free_resources(dev, 0, 0, false);
3173 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3177 /* Clear shaper profiles */
3178 nix_tm_clear_shaper_profiles(dev);
3185 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3186 uint32_t *rr_quantum, uint16_t *smq)
3188 struct otx2_nix_tm_node *tm_node;
3191 /* 0..sq_cnt-1 are leaf nodes */
3192 if (sq >= dev->tm_leaf_cnt)
3195 /* Search for internal node first */
3196 tm_node = nix_tm_node_search(dev, sq, false);
3198 tm_node = nix_tm_node_search(dev, sq, true);
3200 /* Check if we found a valid leaf node */
3201 if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3202 !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3206 /* Get SMQ Id of leaf node's parent */
3207 *smq = tm_node->parent->hw_id;
3208 *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3210 rc = nix_smq_xoff(dev, tm_node->parent, false);
3213 tm_node->flags |= NIX_TM_NODE_ENABLED;