1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_malloc.h>
7 #include "otx2_ethdev.h"
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
13 enum otx2_tm_node_level {
24 uint64_t shaper2regval(struct shaper_params *shaper)
26 return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28 (shaper->mantissa << 1);
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
34 int link = 13 /* SDP */;
38 lmac_chan = dev->tx_chan_base;
41 if (lmac_chan >= 0x800) {
42 map = lmac_chan & 0x7FF;
43 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44 } else if (lmac_chan < 0x700) {
53 nix_get_relchan(struct otx2_eth_dev *dev)
55 return dev->tx_chan_base & 0xff;
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
61 bool is_lbk = otx2_dev_is_lbk(dev);
62 return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
68 if (nix_tm_have_tl1_access(dev))
69 return (lvl == OTX2_TM_LVL_QUEUE);
71 return (lvl == OTX2_TM_LVL_SCH4);
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
77 struct otx2_nix_tm_node *child_node;
79 TAILQ_FOREACH(child_node, &dev->node_list, node) {
80 if (!child_node->parent)
82 if (!(child_node->parent->id == node_id))
84 if (child_node->priority == child_node->parent->rr_prio)
86 return child_node->hw_id - child_node->priority;
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
95 struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
97 TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99 return tm_shaper_profile;
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106 uint64_t *mantissa_p, uint64_t *div_exp_p)
108 uint64_t div_exp, exponent, mantissa;
110 /* Boundary checks */
111 if (value < MIN_SHAPER_RATE ||
112 value > MAX_SHAPER_RATE)
115 if (value <= SHAPER_RATE(0, 0, 0)) {
116 /* Calculate rate div_exp and mantissa using
117 * the following formula:
119 * value = (2E6 * (256 + mantissa)
120 * / ((1 << div_exp) * 256))
124 mantissa = MAX_RATE_MANTISSA;
126 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
130 ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131 ((1 << div_exp) * 256)))
134 /* Calculate rate exponent and mantissa using
135 * the following formula:
137 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
141 exponent = MAX_RATE_EXPONENT;
142 mantissa = MAX_RATE_MANTISSA;
144 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
147 while (value < ((NIX_SHAPER_RATE_CONST *
148 ((256 + mantissa) << exponent)) / 256))
152 if (div_exp > MAX_RATE_DIV_EXP ||
153 exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
157 *div_exp_p = div_exp;
159 *exponent_p = exponent;
161 *mantissa_p = mantissa;
163 /* Calculate real rate value */
164 return SHAPER_RATE(exponent, mantissa, div_exp);
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169 uint64_t *mantissa_p)
171 uint64_t exponent, mantissa;
173 if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
176 /* Calculate burst exponent and mantissa using
177 * the following formula:
179 * value = (((256 + mantissa) << (exponent + 1)
183 exponent = MAX_BURST_EXPONENT;
184 mantissa = MAX_BURST_MANTISSA;
186 while (value < (1ull << (exponent + 1)))
189 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
192 if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
196 *exponent_p = exponent;
198 *mantissa_p = mantissa;
200 return SHAPER_BURST(exponent, mantissa);
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205 struct shaper_params *cir,
206 struct shaper_params *pir)
208 struct rte_tm_shaper_params *param = &profile->params;
213 /* Calculate CIR exponent and mantissa */
214 if (param->committed.rate)
215 cir->rate = shaper_rate_to_nix(param->committed.rate,
220 /* Calculate PIR exponent and mantissa */
221 if (param->peak.rate)
222 pir->rate = shaper_rate_to_nix(param->peak.rate,
227 /* Calculate CIR burst exponent and mantissa */
228 if (param->committed.size)
229 cir->burst = shaper_burst_to_nix(param->committed.size,
230 &cir->burst_exponent,
231 &cir->burst_mantissa);
233 /* Calculate PIR burst exponent and mantissa */
234 if (param->peak.size)
235 pir->burst = shaper_burst_to_nix(param->peak.size,
236 &pir->burst_exponent,
237 &pir->burst_mantissa);
241 shaper_default_red_algo(struct otx2_eth_dev *dev,
242 struct otx2_nix_tm_node *tm_node,
243 struct otx2_nix_tm_shaper_profile *profile)
245 struct shaper_params cir, pir;
247 /* C0 doesn't support STALL when both PIR & CIR are enabled */
248 if (profile && otx2_dev_is_96xx_Cx(dev)) {
249 memset(&cir, 0, sizeof(cir));
250 memset(&pir, 0, sizeof(pir));
251 shaper_config_to_nix(profile, &cir, &pir);
253 if (pir.rate && cir.rate) {
254 tm_node->red_algo = NIX_REDALG_DISCARD;
255 tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
260 tm_node->red_algo = NIX_REDALG_STD;
261 tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
265 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
267 struct otx2_mbox *mbox = dev->mbox;
268 struct nix_txschq_config *req;
271 * Default config for TL1.
272 * For VF this is always ignored.
275 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
276 req->lvl = NIX_TXSCH_LVL_TL1;
278 /* Set DWRR quantum */
279 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
280 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
283 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
284 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
287 req->reg[2] = NIX_AF_TL1X_CIR(schq);
291 return otx2_mbox_process(mbox);
295 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
296 struct otx2_nix_tm_node *tm_node,
297 volatile uint64_t *reg, volatile uint64_t *regval)
299 uint64_t strict_prio = tm_node->priority;
300 uint32_t hw_lvl = tm_node->hw_lvl;
301 uint32_t schq = tm_node->hw_id;
305 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
307 /* For children to root, strict prio is default if either
308 * device root is TL2 or TL1 Static Priority is disabled.
310 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
311 (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
312 dev->tm_flags & NIX_TM_TL1_NO_SP))
313 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
315 otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
316 "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
317 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
318 tm_node->id, strict_prio, rr_quantum, tm_node);
321 case NIX_TXSCH_LVL_SMQ:
322 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
323 regval[k] = (strict_prio << 24) | rr_quantum;
327 case NIX_TXSCH_LVL_TL4:
328 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
329 regval[k] = (strict_prio << 24) | rr_quantum;
333 case NIX_TXSCH_LVL_TL3:
334 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
335 regval[k] = (strict_prio << 24) | rr_quantum;
339 case NIX_TXSCH_LVL_TL2:
340 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
341 regval[k] = (strict_prio << 24) | rr_quantum;
345 case NIX_TXSCH_LVL_TL1:
346 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
347 regval[k] = rr_quantum;
357 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
358 struct otx2_nix_tm_shaper_profile *profile,
359 volatile uint64_t *reg, volatile uint64_t *regval)
361 struct shaper_params cir, pir;
362 uint32_t schq = tm_node->hw_id;
366 memset(&cir, 0, sizeof(cir));
367 memset(&pir, 0, sizeof(pir));
368 shaper_config_to_nix(profile, &cir, &pir);
370 /* Packet length adjust */
371 if (tm_node->pkt_mode)
374 adjust = profile->params.pkt_length_adjust & 0x1FF;
376 otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, pir %" PRIu64
377 "(%" PRIu64 "B), cir %" PRIu64 "(%" PRIu64 "B)"
378 "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
379 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
380 tm_node->id, pir.rate, pir.burst, cir.rate, cir.burst,
381 adjust, tm_node->pkt_mode, tm_node);
383 switch (tm_node->hw_lvl) {
384 case NIX_TXSCH_LVL_SMQ:
385 /* Configure PIR, CIR */
386 reg[k] = NIX_AF_MDQX_PIR(schq);
387 regval[k] = (pir.rate && pir.burst) ?
388 (shaper2regval(&pir) | 1) : 0;
391 reg[k] = NIX_AF_MDQX_CIR(schq);
392 regval[k] = (cir.rate && cir.burst) ?
393 (shaper2regval(&cir) | 1) : 0;
396 /* Configure RED ALG */
397 reg[k] = NIX_AF_MDQX_SHAPE(schq);
398 regval[k] = (adjust |
399 (uint64_t)tm_node->red_algo << 9 |
400 (uint64_t)tm_node->pkt_mode << 24);
403 case NIX_TXSCH_LVL_TL4:
404 /* Configure PIR, CIR */
405 reg[k] = NIX_AF_TL4X_PIR(schq);
406 regval[k] = (pir.rate && pir.burst) ?
407 (shaper2regval(&pir) | 1) : 0;
410 reg[k] = NIX_AF_TL4X_CIR(schq);
411 regval[k] = (cir.rate && cir.burst) ?
412 (shaper2regval(&cir) | 1) : 0;
415 /* Configure RED algo */
416 reg[k] = NIX_AF_TL4X_SHAPE(schq);
417 regval[k] = (adjust |
418 (uint64_t)tm_node->red_algo << 9 |
419 (uint64_t)tm_node->pkt_mode << 24);
422 case NIX_TXSCH_LVL_TL3:
423 /* Configure PIR, CIR */
424 reg[k] = NIX_AF_TL3X_PIR(schq);
425 regval[k] = (pir.rate && pir.burst) ?
426 (shaper2regval(&pir) | 1) : 0;
429 reg[k] = NIX_AF_TL3X_CIR(schq);
430 regval[k] = (cir.rate && cir.burst) ?
431 (shaper2regval(&cir) | 1) : 0;
434 /* Configure RED algo */
435 reg[k] = NIX_AF_TL3X_SHAPE(schq);
436 regval[k] = (adjust |
437 (uint64_t)tm_node->red_algo << 9 |
438 (uint64_t)tm_node->pkt_mode << 24);
442 case NIX_TXSCH_LVL_TL2:
443 /* Configure PIR, CIR */
444 reg[k] = NIX_AF_TL2X_PIR(schq);
445 regval[k] = (pir.rate && pir.burst) ?
446 (shaper2regval(&pir) | 1) : 0;
449 reg[k] = NIX_AF_TL2X_CIR(schq);
450 regval[k] = (cir.rate && cir.burst) ?
451 (shaper2regval(&cir) | 1) : 0;
454 /* Configure RED algo */
455 reg[k] = NIX_AF_TL2X_SHAPE(schq);
456 regval[k] = (adjust |
457 (uint64_t)tm_node->red_algo << 9 |
458 (uint64_t)tm_node->pkt_mode << 24);
462 case NIX_TXSCH_LVL_TL1:
464 reg[k] = NIX_AF_TL1X_CIR(schq);
465 regval[k] = (cir.rate && cir.burst) ?
466 (shaper2regval(&cir) | 1) : 0;
469 /* Configure length disable and adjust */
470 reg[k] = NIX_AF_TL1X_SHAPE(schq);
471 regval[k] = (adjust |
472 (uint64_t)tm_node->pkt_mode << 24);
481 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
482 volatile uint64_t *reg, volatile uint64_t *regval)
484 uint32_t hw_lvl = tm_node->hw_lvl;
485 uint32_t schq = tm_node->hw_id;
488 otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
489 nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
490 tm_node->id, enable, tm_node);
495 case NIX_TXSCH_LVL_MDQ:
496 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
499 case NIX_TXSCH_LVL_TL4:
500 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
503 case NIX_TXSCH_LVL_TL3:
504 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
507 case NIX_TXSCH_LVL_TL2:
508 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
511 case NIX_TXSCH_LVL_TL1:
512 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
523 populate_tm_reg(struct otx2_eth_dev *dev,
524 struct otx2_nix_tm_node *tm_node)
526 struct otx2_nix_tm_shaper_profile *profile;
527 uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
528 uint64_t regval[MAX_REGS_PER_MBOX_MSG];
529 uint64_t reg[MAX_REGS_PER_MBOX_MSG];
530 struct otx2_mbox *mbox = dev->mbox;
531 uint64_t parent = 0, child = 0;
532 uint32_t hw_lvl, rr_prio, schq;
533 struct nix_txschq_config *req;
537 memset(regval_mask, 0, sizeof(regval_mask));
538 profile = nix_tm_shaper_profile_search(dev,
539 tm_node->params.shaper_profile_id);
540 rr_prio = tm_node->rr_prio;
541 hw_lvl = tm_node->hw_lvl;
542 schq = tm_node->hw_id;
544 /* Root node will not have a parent node */
545 if (hw_lvl == dev->otx2_tm_root_lvl)
546 parent = tm_node->parent_hw_id;
548 parent = tm_node->parent->hw_id;
550 /* Do we need this trigger to configure TL1 */
551 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
552 hw_lvl == dev->otx2_tm_root_lvl) {
553 rc = populate_tm_tl1_default(dev, parent);
558 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
559 child = find_prio_anchor(dev, tm_node->id);
561 /* Override default rr_prio when TL1
562 * Static Priority is disabled
564 if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
565 dev->tm_flags & NIX_TM_TL1_NO_SP) {
566 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
570 otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
571 " prio_anchor %"PRIu64" rr_prio %u (%p)",
572 nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
573 parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
575 /* Prepare Topology and Link config */
577 case NIX_TXSCH_LVL_SMQ:
579 /* Set xoff which will be cleared later and minimum length
580 * which will be used for zero padding if packet length is
583 reg[k] = NIX_AF_SMQX_CFG(schq);
584 regval[k] = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) |
586 regval_mask[k] = ~(BIT_ULL(50) | (0x7ULL << 36) | 0x7f);
589 /* Parent and schedule conf */
590 reg[k] = NIX_AF_MDQX_PARENT(schq);
591 regval[k] = parent << 16;
595 case NIX_TXSCH_LVL_TL4:
596 /* Parent and schedule conf */
597 reg[k] = NIX_AF_TL4X_PARENT(schq);
598 regval[k] = parent << 16;
601 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
602 regval[k] = (child << 32) | (rr_prio << 1);
605 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
606 if (otx2_dev_is_sdp(dev)) {
607 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
608 regval[k] = BIT_ULL(12);
612 case NIX_TXSCH_LVL_TL3:
613 /* Parent and schedule conf */
614 reg[k] = NIX_AF_TL3X_PARENT(schq);
615 regval[k] = parent << 16;
618 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
619 regval[k] = (child << 32) | (rr_prio << 1);
622 /* Link configuration */
623 if (!otx2_dev_is_sdp(dev) &&
624 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
625 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
626 otx2_nix_get_link(dev));
627 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
632 case NIX_TXSCH_LVL_TL2:
633 /* Parent and schedule conf */
634 reg[k] = NIX_AF_TL2X_PARENT(schq);
635 regval[k] = parent << 16;
638 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
639 regval[k] = (child << 32) | (rr_prio << 1);
642 /* Link configuration */
643 if (!otx2_dev_is_sdp(dev) &&
644 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
645 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
646 otx2_nix_get_link(dev));
647 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
652 case NIX_TXSCH_LVL_TL1:
653 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
654 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
660 /* Prepare schedule config */
661 k += prepare_tm_sched_reg(dev, tm_node, ®[k], ®val[k]);
663 /* Prepare shaping config */
664 k += prepare_tm_shaper_reg(tm_node, profile, ®[k], ®val[k]);
669 /* Copy and send config mbox */
670 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
674 otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
675 otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
676 otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
678 rc = otx2_mbox_process(mbox);
684 otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
690 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
692 struct otx2_nix_tm_node *tm_node;
696 for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
697 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
698 if (tm_node->hw_lvl == hw_lvl &&
699 tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
700 rc = populate_tm_reg(dev, tm_node);
710 static struct otx2_nix_tm_node *
711 nix_tm_node_search(struct otx2_eth_dev *dev,
712 uint32_t node_id, bool user)
714 struct otx2_nix_tm_node *tm_node;
716 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
717 if (tm_node->id == node_id &&
718 (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
725 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
727 struct otx2_nix_tm_node *tm_node;
730 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
731 if (!tm_node->parent)
734 if (!(tm_node->parent->id == parent_id))
737 if (tm_node->priority == priority)
744 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
746 struct otx2_nix_tm_node *tm_node_child;
747 struct otx2_nix_tm_node *tm_node;
748 struct otx2_nix_tm_node *parent;
752 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
753 if (!tm_node->parent)
755 /* Count group of children of same priority i.e are RR */
756 parent = tm_node->parent;
757 priority = tm_node->priority;
758 rr_num = check_rr(dev, priority, parent->id);
760 /* Assuming that multiple RR groups are
761 * not configured based on capability.
764 parent->rr_prio = priority;
765 parent->rr_num = rr_num;
768 /* Find out static priority children that are not in RR */
769 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
770 if (!tm_node_child->parent)
772 if (parent->id != tm_node_child->parent->id)
774 if (parent->max_prio == UINT32_MAX &&
775 tm_node_child->priority != parent->rr_prio)
776 parent->max_prio = 0;
778 if (parent->max_prio < tm_node_child->priority &&
779 parent->rr_prio != tm_node_child->priority)
780 parent->max_prio = tm_node_child->priority;
788 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
789 uint32_t parent_node_id, uint32_t priority,
790 uint32_t weight, uint16_t hw_lvl,
791 uint16_t lvl, bool user,
792 struct rte_tm_node_params *params)
794 struct otx2_nix_tm_shaper_profile *profile;
795 struct otx2_nix_tm_node *tm_node, *parent_node;
798 profile_id = params->shaper_profile_id;
799 profile = nix_tm_shaper_profile_search(dev, profile_id);
801 parent_node = nix_tm_node_search(dev, parent_node_id, user);
803 tm_node = rte_zmalloc("otx2_nix_tm_node",
804 sizeof(struct otx2_nix_tm_node), 0);
809 tm_node->hw_lvl = hw_lvl;
811 /* Maintain minimum weight */
815 tm_node->id = node_id;
816 tm_node->priority = priority;
817 tm_node->weight = weight;
818 tm_node->rr_prio = 0xf;
819 tm_node->max_prio = UINT32_MAX;
820 tm_node->hw_id = UINT32_MAX;
823 tm_node->flags = NIX_TM_NODE_USER;
826 if (!nix_tm_is_leaf(dev, lvl) &&
827 ((profile && profile->params.packet_mode) ||
828 (params->nonleaf.wfq_weight_mode &&
829 params->nonleaf.n_sp_priorities &&
830 !params->nonleaf.wfq_weight_mode[0])))
831 tm_node->pkt_mode = 1;
833 rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
836 profile->reference_count++;
838 tm_node->parent = parent_node;
839 tm_node->parent_hw_id = UINT32_MAX;
840 shaper_default_red_algo(dev, tm_node, profile);
842 TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
848 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
850 struct otx2_nix_tm_shaper_profile *shaper_profile;
852 while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
853 if (shaper_profile->reference_count)
854 otx2_tm_dbg("Shaper profile %u has non zero references",
855 shaper_profile->shaper_profile_id);
856 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
857 rte_free(shaper_profile);
864 nix_clear_path_xoff(struct otx2_eth_dev *dev,
865 struct otx2_nix_tm_node *tm_node)
867 struct nix_txschq_config *req;
868 struct otx2_nix_tm_node *p;
871 /* Manipulating SW_XOFF not supported on Ax */
872 if (otx2_dev_is_Ax(dev))
875 /* Enable nodes in path for flush to succeed */
876 if (!nix_tm_is_leaf(dev, tm_node->lvl))
881 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
882 (p->flags & NIX_TM_NODE_HWRES)) {
883 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
884 req->lvl = p->hw_lvl;
885 req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
887 rc = otx2_mbox_process(dev->mbox);
891 p->flags |= NIX_TM_NODE_ENABLED;
900 nix_smq_xoff(struct otx2_eth_dev *dev,
901 struct otx2_nix_tm_node *tm_node,
904 struct otx2_mbox *mbox = dev->mbox;
905 struct nix_txschq_config *req;
909 smq = tm_node->hw_id;
910 otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
911 enable ? "enable" : "disable");
913 rc = nix_clear_path_xoff(dev, tm_node);
917 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
918 req->lvl = NIX_TXSCH_LVL_SMQ;
921 req->reg[0] = NIX_AF_SMQX_CFG(smq);
922 req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
923 req->regval_mask[0] = enable ?
924 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
926 return otx2_mbox_process(mbox);
930 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
932 struct otx2_eth_txq *txq = __txq;
933 struct npa_aq_enq_req *req;
934 struct npa_aq_enq_rsp *rsp;
935 struct otx2_npa_lf *lf;
936 struct otx2_mbox *mbox;
937 uint64_t aura_handle;
940 otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
941 enable ? "enable" : "disable");
943 lf = otx2_npa_lf_obj_get();
947 /* Set/clear sqb aura fc_ena */
948 aura_handle = txq->sqb_pool->pool_id;
949 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
951 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
952 req->ctype = NPA_AQ_CTYPE_AURA;
953 req->op = NPA_AQ_INSTOP_WRITE;
954 /* Below is not needed for aura writes but AF driver needs it */
955 /* AF will translate to associated poolctx */
956 req->aura.pool_addr = req->aura_id;
958 req->aura.fc_ena = enable;
959 req->aura_mask.fc_ena = 1;
961 rc = otx2_mbox_process(mbox);
965 /* Read back npa aura ctx */
966 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
968 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
969 req->ctype = NPA_AQ_CTYPE_AURA;
970 req->op = NPA_AQ_INSTOP_READ;
972 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
976 /* Init when enabled as there might be no triggers */
978 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
980 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
981 /* Sync write barrier */
988 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
990 uint16_t sqb_cnt, head_off, tail_off;
991 struct otx2_eth_dev *dev = txq->dev;
992 uint64_t wdata, val, prev;
993 uint16_t sq = txq->sq;
995 uint64_t timeout;/* 10's of usec */
997 /* Wait for enough time based on shaper min rate */
998 timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
999 timeout = timeout / dev->tm_rate_min;
1003 wdata = ((uint64_t)sq << 32);
1004 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
1005 val = otx2_atomic64_add_nosync(wdata, regaddr);
1007 /* Spin multiple iterations as "txq->fc_cache_pkts" can still
1008 * have space to send pkts even though fc_mem is disabled
1014 val = otx2_atomic64_add_nosync(wdata, regaddr);
1015 /* Continue on error */
1016 if (val & BIT_ULL(63))
1022 sqb_cnt = val & 0xFFFF;
1023 head_off = (val >> 20) & 0x3F;
1024 tail_off = (val >> 28) & 0x3F;
1026 /* SQ reached quiescent state */
1027 if (sqb_cnt <= 1 && head_off == tail_off &&
1028 (*txq->fc_mem == txq->nb_sqb_bufs)) {
1040 otx2_nix_tm_dump(dev);
1044 /* Flush and disable tx queue and its parent SMQ */
1045 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
1047 struct otx2_nix_tm_node *tm_node, *sibling;
1048 struct otx2_eth_txq *txq;
1049 struct otx2_eth_dev *dev;
1058 user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1060 /* Find the node for this SQ */
1061 tm_node = nix_tm_node_search(dev, sq, user);
1062 if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1063 otx2_err("Invalid node/state for sq %u", sq);
1067 /* Enable CGX RXTX to drain pkts */
1069 /* Though it enables both RX MCAM Entries and CGX Link
1070 * we assume all the rx queues are stopped way back.
1072 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1073 rc = otx2_mbox_process(dev->mbox);
1075 otx2_err("cgx start failed, rc=%d", rc);
1080 /* Disable smq xoff for case it was enabled earlier */
1081 rc = nix_smq_xoff(dev, tm_node->parent, false);
1083 otx2_err("Failed to enable smq %u, rc=%d",
1084 tm_node->parent->hw_id, rc);
1088 /* As per HRM, to disable an SQ, all other SQ's
1089 * that feed to same SMQ must be paused before SMQ flush.
1091 TAILQ_FOREACH(sibling, &dev->node_list, node) {
1092 if (sibling->parent != tm_node->parent)
1094 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1098 txq = dev->eth_dev->data->tx_queues[sq];
1102 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1104 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1108 /* Wait for sq entries to be flushed */
1109 rc = nix_txq_flush_sq_spin(txq);
1111 otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1116 tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1118 /* Disable and flush */
1119 rc = nix_smq_xoff(dev, tm_node->parent, true);
1121 otx2_err("Failed to disable smq %u, rc=%d",
1122 tm_node->parent->hw_id, rc);
1126 /* Restore cgx state */
1128 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1129 rc |= otx2_mbox_process(dev->mbox);
1135 int otx2_nix_sq_flush_post(void *_txq)
1137 struct otx2_nix_tm_node *tm_node, *sibling;
1138 struct otx2_eth_txq *txq = _txq;
1139 struct otx2_eth_txq *s_txq;
1140 struct otx2_eth_dev *dev;
1148 user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1150 /* Find the node for this SQ */
1151 tm_node = nix_tm_node_search(dev, sq, user);
1153 otx2_err("Invalid node for sq %u", sq);
1157 /* Enable all the siblings back */
1158 TAILQ_FOREACH(sibling, &dev->node_list, node) {
1159 if (sibling->parent != tm_node->parent)
1162 if (sibling->id == sq)
1165 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1169 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1174 /* Enable back if any SQ is still present */
1175 rc = nix_smq_xoff(dev, tm_node->parent, false);
1177 otx2_err("Failed to enable smq %u, rc=%d",
1178 tm_node->parent->hw_id, rc);
1184 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1186 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1195 nix_sq_sched_data(struct otx2_eth_dev *dev,
1196 struct otx2_nix_tm_node *tm_node,
1197 bool rr_quantum_only)
1199 struct rte_eth_dev *eth_dev = dev->eth_dev;
1200 struct otx2_mbox *mbox = dev->mbox;
1201 uint16_t sq = tm_node->id, smq;
1202 struct nix_aq_enq_req *req;
1203 uint64_t rr_quantum;
1206 smq = tm_node->parent->hw_id;
1207 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1209 if (rr_quantum_only)
1210 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1212 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1213 sq, smq, rr_quantum);
1215 if (sq > eth_dev->data->nb_tx_queues)
1218 req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1220 req->ctype = NIX_AQ_CTYPE_SQ;
1221 req->op = NIX_AQ_INSTOP_WRITE;
1223 /* smq update only when needed */
1224 if (!rr_quantum_only) {
1226 req->sq_mask.smq = ~req->sq_mask.smq;
1228 req->sq.smq_rr_quantum = rr_quantum;
1229 req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1231 rc = otx2_mbox_process(mbox);
1233 otx2_err("Failed to set smq, rc=%d", rc);
1237 int otx2_nix_sq_enable(void *_txq)
1239 struct otx2_eth_txq *txq = _txq;
1242 /* Enable sqb_aura fc */
1243 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1245 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1253 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1254 uint32_t flags, bool hw_only)
1256 struct otx2_nix_tm_shaper_profile *profile;
1257 struct otx2_nix_tm_node *tm_node, *next_node;
1258 struct otx2_mbox *mbox = dev->mbox;
1259 struct nix_txsch_free_req *req;
1260 uint32_t profile_id;
1263 next_node = TAILQ_FIRST(&dev->node_list);
1265 tm_node = next_node;
1266 next_node = TAILQ_NEXT(tm_node, node);
1268 /* Check for only requested nodes */
1269 if ((tm_node->flags & flags_mask) != flags)
1272 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1273 tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1274 tm_node->flags & NIX_TM_NODE_HWRES) {
1275 /* Free specific HW resource */
1276 otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1277 nix_hwlvl2str(tm_node->hw_lvl),
1278 tm_node->hw_id, tm_node->lvl,
1279 tm_node->id, tm_node);
1281 rc = nix_clear_path_xoff(dev, tm_node);
1285 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1287 req->schq_lvl = tm_node->hw_lvl;
1288 req->schq = tm_node->hw_id;
1289 rc = otx2_mbox_process(mbox);
1292 tm_node->flags &= ~NIX_TM_NODE_HWRES;
1295 /* Leave software elements if needed */
1299 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1300 tm_node->lvl, tm_node->id, tm_node);
1302 profile_id = tm_node->params.shaper_profile_id;
1303 profile = nix_tm_shaper_profile_search(dev, profile_id);
1305 profile->reference_count--;
1307 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1312 /* Free all hw resources */
1313 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1314 req->flags = TXSCHQ_FREE_ALL;
1316 return otx2_mbox_process(mbox);
1323 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1324 struct nix_txsch_alloc_rsp *rsp)
1329 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1330 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1331 dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1332 dev->txschq_contig_list[lvl][schq] =
1333 rsp->schq_contig_list[lvl][schq];
1336 dev->txschq[lvl] = rsp->schq[lvl];
1337 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1343 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1344 struct otx2_nix_tm_node *child,
1345 struct otx2_nix_tm_node *parent)
1347 uint32_t hw_id, schq_con_index, prio_offset;
1348 uint32_t l_id, schq_index;
1350 otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1351 nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1353 child->flags |= NIX_TM_NODE_HWRES;
1355 /* Process root nodes */
1356 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1357 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1359 uint32_t tschq_con_index;
1361 l_id = child->hw_lvl;
1362 tschq_con_index = dev->txschq_contig_index[l_id];
1363 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1364 child->hw_id = hw_id;
1365 dev->txschq_contig_index[l_id]++;
1366 /* Update TL1 hw_id for its parent for config purpose */
1367 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1368 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1369 child->parent_hw_id = hw_id;
1372 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1373 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1374 uint32_t tschq_con_index;
1376 l_id = child->hw_lvl;
1377 tschq_con_index = dev->txschq_index[l_id];
1378 hw_id = dev->txschq_list[l_id][tschq_con_index];
1379 child->hw_id = hw_id;
1380 dev->txschq_index[l_id]++;
1384 /* Process children with parents */
1385 l_id = child->hw_lvl;
1386 schq_index = dev->txschq_index[l_id];
1387 schq_con_index = dev->txschq_contig_index[l_id];
1389 if (child->priority == parent->rr_prio) {
1390 hw_id = dev->txschq_list[l_id][schq_index];
1391 child->hw_id = hw_id;
1392 child->parent_hw_id = parent->hw_id;
1393 dev->txschq_index[l_id]++;
1395 prio_offset = schq_con_index + child->priority;
1396 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1397 child->hw_id = hw_id;
1403 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1405 struct otx2_nix_tm_node *parent, *child;
1406 uint32_t child_hw_lvl, con_index_inc, i;
1408 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1409 TAILQ_FOREACH(parent, &dev->node_list, node) {
1410 child_hw_lvl = parent->hw_lvl - 1;
1411 if (parent->hw_lvl != i)
1413 TAILQ_FOREACH(child, &dev->node_list, node) {
1416 if (child->parent->id != parent->id)
1418 nix_tm_assign_id_to_node(dev, child, parent);
1421 con_index_inc = parent->max_prio + 1;
1422 dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1425 * Explicitly assign id to parent node if it
1426 * doesn't have a parent
1428 if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1429 nix_tm_assign_id_to_node(dev, parent, NULL);
1436 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1437 struct nix_txsch_alloc_req *req, uint8_t lvl)
1439 struct otx2_nix_tm_node *tm_node;
1440 uint8_t contig_count;
1442 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1443 if (lvl == tm_node->hw_lvl) {
1444 req->schq[lvl - 1] += tm_node->rr_num;
1445 if (tm_node->max_prio != UINT32_MAX) {
1446 contig_count = tm_node->max_prio + 1;
1447 req->schq_contig[lvl - 1] += contig_count;
1450 if (lvl == dev->otx2_tm_root_lvl &&
1451 dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1452 tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1453 req->schq_contig[dev->otx2_tm_root_lvl]++;
1457 req->schq[NIX_TXSCH_LVL_TL1] = 1;
1458 req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1464 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1465 struct nix_txsch_alloc_req *req)
1469 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1470 nix_tm_count_req_schq(dev, req, i);
1472 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1473 dev->txschq_index[i] = 0;
1474 dev->txschq_contig_index[i] = 0;
1480 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1482 struct otx2_mbox *mbox = dev->mbox;
1483 struct nix_txsch_alloc_req *req;
1484 struct nix_txsch_alloc_rsp *rsp;
1487 req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1489 rc = nix_tm_prepare_txschq_req(dev, req);
1493 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1497 nix_tm_copy_rsp_to_dev(dev, rsp);
1498 dev->link_cfg_lvl = rsp->link_cfg_lvl;
1500 nix_tm_assign_hw_id(dev);
1505 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1507 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1508 struct otx2_nix_tm_node *tm_node;
1509 struct otx2_eth_txq *txq;
1513 nix_tm_update_parent_info(dev);
1515 rc = nix_tm_send_txsch_alloc_msg(dev);
1517 otx2_err("TM failed to alloc tm resources=%d", rc);
1521 rc = nix_tm_txsch_reg_config(dev);
1523 otx2_err("TM failed to configure sched registers=%d", rc);
1527 /* Trigger MTU recalculate as SMQ needs MTU conf */
1528 if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1529 rc = otx2_nix_recalc_mtu(eth_dev);
1531 otx2_err("TM MTU update failed, rc=%d", rc);
1536 /* Mark all non-leaf's as enabled */
1537 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1538 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1539 tm_node->flags |= NIX_TM_NODE_ENABLED;
1545 /* Update SQ Sched Data while SQ is idle */
1546 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1547 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1550 rc = nix_sq_sched_data(dev, tm_node, false);
1552 otx2_err("SQ %u sched update failed, rc=%d",
1558 /* Finally XON all SMQ's */
1559 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1560 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1563 rc = nix_smq_xoff(dev, tm_node, false);
1565 otx2_err("Failed to enable smq %u, rc=%d",
1566 tm_node->hw_id, rc);
1571 /* Enable xmit as all the topology is ready */
1572 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1573 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1577 txq = eth_dev->data->tx_queues[sq];
1579 rc = otx2_nix_sq_enable(txq);
1581 otx2_err("TM sw xon failed on SQ %u, rc=%d",
1585 tm_node->flags |= NIX_TM_NODE_ENABLED;
1592 send_tm_reqval(struct otx2_mbox *mbox,
1593 struct nix_txschq_config *req,
1594 struct rte_tm_error *error)
1598 if (!req->num_regs ||
1599 req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1600 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1601 error->message = "invalid config";
1605 rc = otx2_mbox_process(mbox);
1607 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1608 error->message = "unexpected fatal error";
1614 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1616 if (nix_tm_have_tl1_access(dev)) {
1618 case OTX2_TM_LVL_ROOT:
1619 return NIX_TXSCH_LVL_TL1;
1620 case OTX2_TM_LVL_SCH1:
1621 return NIX_TXSCH_LVL_TL2;
1622 case OTX2_TM_LVL_SCH2:
1623 return NIX_TXSCH_LVL_TL3;
1624 case OTX2_TM_LVL_SCH3:
1625 return NIX_TXSCH_LVL_TL4;
1626 case OTX2_TM_LVL_SCH4:
1627 return NIX_TXSCH_LVL_SMQ;
1629 return NIX_TXSCH_LVL_CNT;
1633 case OTX2_TM_LVL_ROOT:
1634 return NIX_TXSCH_LVL_TL2;
1635 case OTX2_TM_LVL_SCH1:
1636 return NIX_TXSCH_LVL_TL3;
1637 case OTX2_TM_LVL_SCH2:
1638 return NIX_TXSCH_LVL_TL4;
1639 case OTX2_TM_LVL_SCH3:
1640 return NIX_TXSCH_LVL_SMQ;
1642 return NIX_TXSCH_LVL_CNT;
1648 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1650 if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1653 /* MDQ doesn't support SP */
1654 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1657 /* PF's TL1 with VF's enabled doesn't support SP */
1658 if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1659 (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1660 (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1663 return TXSCH_TLX_SP_PRIO_MAX - 1;
1668 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1669 uint32_t parent_id, uint32_t priority,
1670 struct rte_tm_error *error)
1672 uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1673 struct otx2_nix_tm_node *tm_node;
1674 uint32_t rr_num = 0;
1677 /* Validate priority against max */
1678 if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1679 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1680 error->message = "unsupported priority value";
1684 if (parent_id == RTE_TM_NODE_ID_NULL)
1687 memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1688 priorities[priority] = 1;
1690 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1691 if (!tm_node->parent)
1694 if (!(tm_node->flags & NIX_TM_NODE_USER))
1697 if (tm_node->parent->id != parent_id)
1700 priorities[tm_node->priority]++;
1703 for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1704 if (priorities[i] > 1)
1707 /* At max, one rr groups per parent */
1709 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1710 error->message = "multiple DWRR node priority";
1714 /* Check for previous priority to avoid holes in priorities */
1715 if (priority && !priorities[priority - 1]) {
1716 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1717 error->message = "priority not in order";
1725 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1726 uint64_t *regval, uint32_t hw_lvl)
1728 volatile struct nix_txschq_config *req;
1729 struct nix_txschq_config *rsp;
1732 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1738 rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1741 *regval = rsp->regval[0];
1745 /* Search for min rate in topology */
1747 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1749 struct otx2_nix_tm_shaper_profile *profile;
1750 uint64_t rate_min = 1E9; /* 1 Gbps */
1752 TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1753 if (profile->params.peak.rate &&
1754 profile->params.peak.rate < rate_min)
1755 rate_min = profile->params.peak.rate;
1757 if (profile->params.committed.rate &&
1758 profile->params.committed.rate < rate_min)
1759 rate_min = profile->params.committed.rate;
1762 dev->tm_rate_min = rate_min;
1766 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1768 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1769 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1770 uint16_t sqb_cnt, head_off, tail_off;
1771 struct otx2_nix_tm_node *tm_node;
1772 struct otx2_eth_txq *txq;
1773 uint64_t wdata, val;
1776 otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1778 /* Enable CGX RXTX to drain pkts */
1779 if (!eth_dev->data->dev_started) {
1780 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1781 rc = otx2_mbox_process(dev->mbox);
1787 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1788 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1790 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1793 rc = nix_smq_xoff(dev, tm_node, false);
1795 otx2_err("Failed to enable smq %u, rc=%d",
1796 tm_node->hw_id, rc);
1801 /* Flush all tx queues */
1802 for (i = 0; i < sq_cnt; i++) {
1803 txq = eth_dev->data->tx_queues[i];
1805 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1807 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1811 /* Wait for sq entries to be flushed */
1812 rc = nix_txq_flush_sq_spin(txq);
1814 otx2_err("Failed to drain sq, rc=%d\n", rc);
1819 /* XOFF & Flush all SMQ's. HRM mandates
1820 * all SQ's empty before SMQ flush is issued.
1822 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1823 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1825 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1828 rc = nix_smq_xoff(dev, tm_node, true);
1830 otx2_err("Failed to enable smq %u, rc=%d",
1831 tm_node->hw_id, rc);
1836 /* Verify sanity of all tx queues */
1837 for (i = 0; i < sq_cnt; i++) {
1838 txq = eth_dev->data->tx_queues[i];
1840 wdata = ((uint64_t)txq->sq << 32);
1841 val = otx2_atomic64_add_nosync(wdata,
1842 (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1844 sqb_cnt = val & 0xFFFF;
1845 head_off = (val >> 20) & 0x3F;
1846 tail_off = (val >> 28) & 0x3F;
1848 if (sqb_cnt > 1 || head_off != tail_off ||
1849 (*txq->fc_mem != txq->nb_sqb_bufs))
1850 otx2_err("Failed to gracefully flush sq %u", txq->sq);
1854 /* restore cgx state */
1855 if (!eth_dev->data->dev_started) {
1856 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1857 rc |= otx2_mbox_process(dev->mbox);
1864 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1865 int *is_leaf, struct rte_tm_error *error)
1867 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1868 struct otx2_nix_tm_node *tm_node;
1870 if (is_leaf == NULL) {
1871 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1875 tm_node = nix_tm_node_search(dev, node_id, true);
1876 if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1877 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1880 if (nix_tm_is_leaf(dev, tm_node->lvl))
1888 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1889 struct rte_tm_capabilities *cap,
1890 struct rte_tm_error *error)
1892 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1893 struct otx2_mbox *mbox = dev->mbox;
1894 int rc, max_nr_nodes = 0, i;
1895 struct free_rsrcs_rsp *rsp;
1897 memset(cap, 0, sizeof(*cap));
1899 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1900 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1902 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1903 error->message = "unexpected fatal error";
1907 for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1908 max_nr_nodes += rsp->schq[i];
1910 cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1911 /* TL1 level is reserved for PF */
1912 cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1913 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1914 cap->non_leaf_nodes_identical = 1;
1915 cap->leaf_nodes_identical = 1;
1917 /* Shaper Capabilities */
1918 cap->shaper_private_n_max = max_nr_nodes;
1919 cap->shaper_n_max = max_nr_nodes;
1920 cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1921 cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1922 cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1923 cap->shaper_private_packet_mode_supported = 1;
1924 cap->shaper_private_byte_mode_supported = 1;
1925 cap->shaper_pkt_length_adjust_min = NIX_LENGTH_ADJUST_MIN;
1926 cap->shaper_pkt_length_adjust_max = NIX_LENGTH_ADJUST_MAX;
1928 /* Schedule Capabilities */
1929 cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1930 cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1931 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1932 cap->sched_wfq_n_groups_max = 1;
1933 cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1934 cap->sched_wfq_packet_mode_supported = 1;
1935 cap->sched_wfq_byte_mode_supported = 1;
1937 cap->dynamic_update_mask =
1938 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1939 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1941 RTE_TM_STATS_N_PKTS |
1942 RTE_TM_STATS_N_BYTES |
1943 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1944 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1946 for (i = 0; i < RTE_COLORS; i++) {
1947 cap->mark_vlan_dei_supported[i] = false;
1948 cap->mark_ip_ecn_tcp_supported[i] = false;
1949 cap->mark_ip_dscp_supported[i] = false;
1956 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1957 struct rte_tm_level_capabilities *cap,
1958 struct rte_tm_error *error)
1960 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1961 struct otx2_mbox *mbox = dev->mbox;
1962 struct free_rsrcs_rsp *rsp;
1966 memset(cap, 0, sizeof(*cap));
1968 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1969 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1971 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1972 error->message = "unexpected fatal error";
1976 hw_lvl = nix_tm_lvl2nix(dev, lvl);
1978 if (nix_tm_is_leaf(dev, lvl)) {
1980 cap->n_nodes_max = dev->tm_leaf_cnt;
1981 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1982 cap->leaf_nodes_identical = 1;
1983 cap->leaf.stats_mask =
1984 RTE_TM_STATS_N_PKTS |
1985 RTE_TM_STATS_N_BYTES;
1987 } else if (lvl == OTX2_TM_LVL_ROOT) {
1988 /* Root node, aka TL2(vf)/TL1(pf) */
1989 cap->n_nodes_max = 1;
1990 cap->n_nodes_nonleaf_max = 1;
1991 cap->non_leaf_nodes_identical = 1;
1993 cap->nonleaf.shaper_private_supported = true;
1994 cap->nonleaf.shaper_private_dual_rate_supported =
1995 nix_tm_have_tl1_access(dev) ? false : true;
1996 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1997 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1998 cap->nonleaf.shaper_private_packet_mode_supported = 1;
1999 cap->nonleaf.shaper_private_byte_mode_supported = 1;
2001 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2002 cap->nonleaf.sched_sp_n_priorities_max =
2003 nix_max_prio(dev, hw_lvl) + 1;
2004 cap->nonleaf.sched_wfq_n_groups_max = 1;
2005 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2006 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2007 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2009 if (nix_tm_have_tl1_access(dev))
2010 cap->nonleaf.stats_mask =
2011 RTE_TM_STATS_N_PKTS_RED_DROPPED |
2012 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2013 } else if ((lvl < OTX2_TM_LVL_MAX) &&
2014 (hw_lvl < NIX_TXSCH_LVL_CNT)) {
2015 /* TL2, TL3, TL4, MDQ */
2016 cap->n_nodes_max = rsp->schq[hw_lvl];
2017 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
2018 cap->non_leaf_nodes_identical = 1;
2020 cap->nonleaf.shaper_private_supported = true;
2021 cap->nonleaf.shaper_private_dual_rate_supported = true;
2022 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2023 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2024 cap->nonleaf.shaper_private_packet_mode_supported = 1;
2025 cap->nonleaf.shaper_private_byte_mode_supported = 1;
2027 /* MDQ doesn't support Strict Priority */
2028 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2029 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2031 cap->nonleaf.sched_n_children_max =
2032 rsp->schq[hw_lvl - 1];
2033 cap->nonleaf.sched_sp_n_priorities_max =
2034 nix_max_prio(dev, hw_lvl) + 1;
2035 cap->nonleaf.sched_wfq_n_groups_max = 1;
2036 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2037 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2038 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2040 /* unsupported level */
2041 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2048 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
2049 struct rte_tm_node_capabilities *cap,
2050 struct rte_tm_error *error)
2052 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2053 struct otx2_mbox *mbox = dev->mbox;
2054 struct otx2_nix_tm_node *tm_node;
2055 struct free_rsrcs_rsp *rsp;
2056 int rc, hw_lvl, lvl;
2058 memset(cap, 0, sizeof(*cap));
2060 tm_node = nix_tm_node_search(dev, node_id, true);
2062 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2063 error->message = "no such node";
2067 hw_lvl = tm_node->hw_lvl;
2071 if (nix_tm_is_leaf(dev, lvl)) {
2072 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2073 RTE_TM_STATS_N_BYTES;
2077 otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2078 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2080 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2081 error->message = "unexpected fatal error";
2085 /* Non Leaf Shaper */
2086 cap->shaper_private_supported = true;
2087 cap->shaper_private_dual_rate_supported =
2088 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2089 cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2090 cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2091 cap->shaper_private_packet_mode_supported = 1;
2092 cap->shaper_private_byte_mode_supported = 1;
2094 /* Non Leaf Scheduler */
2095 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2096 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2098 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2100 cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2101 cap->nonleaf.sched_wfq_n_children_per_group_max =
2102 cap->nonleaf.sched_n_children_max;
2103 cap->nonleaf.sched_wfq_n_groups_max = 1;
2104 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2105 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2106 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2108 if (hw_lvl == NIX_TXSCH_LVL_TL1)
2109 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2110 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2115 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2116 uint32_t profile_id,
2117 struct rte_tm_shaper_params *params,
2118 struct rte_tm_error *error)
2120 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2121 struct otx2_nix_tm_shaper_profile *profile;
2123 profile = nix_tm_shaper_profile_search(dev, profile_id);
2125 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2126 error->message = "shaper profile ID exist";
2130 /* Committed rate and burst size can be enabled/disabled */
2131 if (params->committed.size || params->committed.rate) {
2132 if (params->committed.size < MIN_SHAPER_BURST ||
2133 params->committed.size > MAX_SHAPER_BURST) {
2135 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2137 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2138 NULL, NULL, NULL)) {
2140 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2141 error->message = "shaper committed rate invalid";
2146 /* Peak rate and burst size can be enabled/disabled */
2147 if (params->peak.size || params->peak.rate) {
2148 if (params->peak.size < MIN_SHAPER_BURST ||
2149 params->peak.size > MAX_SHAPER_BURST) {
2151 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2153 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2154 NULL, NULL, NULL)) {
2156 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2157 error->message = "shaper peak rate invalid";
2162 if (params->pkt_length_adjust < NIX_LENGTH_ADJUST_MIN ||
2163 params->pkt_length_adjust > NIX_LENGTH_ADJUST_MAX) {
2164 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
2165 error->message = "length adjust invalid";
2169 profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2170 sizeof(struct otx2_nix_tm_shaper_profile), 0);
2174 profile->shaper_profile_id = profile_id;
2175 rte_memcpy(&profile->params, params,
2176 sizeof(struct rte_tm_shaper_params));
2177 TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2179 otx2_tm_dbg("Added TM shaper profile %u, "
2180 " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2181 ", cbs %" PRIu64 " , adj %u, pkt mode %d",
2183 params->peak.rate * 8,
2185 params->committed.rate * 8,
2186 params->committed.size,
2187 params->pkt_length_adjust,
2188 params->packet_mode);
2190 /* Translate rate as bits per second */
2191 profile->params.peak.rate = profile->params.peak.rate * 8;
2192 profile->params.committed.rate = profile->params.committed.rate * 8;
2193 /* Always use PIR for single rate shaping */
2194 if (!params->peak.rate && params->committed.rate) {
2195 profile->params.peak = profile->params.committed;
2196 memset(&profile->params.committed, 0,
2197 sizeof(profile->params.committed));
2200 /* update min rate */
2201 nix_tm_shaper_profile_update_min(dev);
2206 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2207 uint32_t profile_id,
2208 struct rte_tm_error *error)
2210 struct otx2_nix_tm_shaper_profile *profile;
2211 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2213 profile = nix_tm_shaper_profile_search(dev, profile_id);
2216 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2217 error->message = "shaper profile ID not exist";
2221 if (profile->reference_count) {
2222 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2223 error->message = "shaper profile in use";
2227 otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2228 TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2231 /* update min rate */
2232 nix_tm_shaper_profile_update_min(dev);
2237 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2238 uint32_t parent_node_id, uint32_t priority,
2239 uint32_t weight, uint32_t lvl,
2240 struct rte_tm_node_params *params,
2241 struct rte_tm_error *error)
2243 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2244 struct otx2_nix_tm_shaper_profile *profile = NULL;
2245 struct otx2_nix_tm_node *parent_node;
2246 int rc, pkt_mode, clear_on_fail = 0;
2247 uint32_t exp_next_lvl, i;
2248 uint32_t profile_id;
2251 /* we don't support dynamic updates */
2252 if (dev->tm_flags & NIX_TM_COMMITTED) {
2253 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2254 error->message = "dynamic update not supported";
2258 /* Leaf nodes have to be same priority */
2259 if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2260 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2261 error->message = "queue shapers must be priority 0";
2265 parent_node = nix_tm_node_search(dev, parent_node_id, true);
2267 /* find the right level */
2268 if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2269 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2270 lvl = OTX2_TM_LVL_ROOT;
2271 } else if (parent_node) {
2272 lvl = parent_node->lvl + 1;
2274 /* Neigher proper parent nor proper level id given */
2275 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2276 error->message = "invalid parent node id";
2281 /* Translate rte_tm level id's to nix hw level id's */
2282 hw_lvl = nix_tm_lvl2nix(dev, lvl);
2283 if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2284 !nix_tm_is_leaf(dev, lvl)) {
2285 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2286 error->message = "invalid level id";
2290 if (node_id < dev->tm_leaf_cnt)
2291 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2293 exp_next_lvl = hw_lvl + 1;
2295 /* Check if there is no parent node yet */
2296 if (hw_lvl != dev->otx2_tm_root_lvl &&
2297 (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2298 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2299 error->message = "invalid parent node id";
2303 /* Check if a node already exists */
2304 if (nix_tm_node_search(dev, node_id, true)) {
2305 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2306 error->message = "node already exists";
2310 if (!nix_tm_is_leaf(dev, lvl)) {
2311 /* Check if shaper profile exists for non leaf node */
2312 profile_id = params->shaper_profile_id;
2313 profile = nix_tm_shaper_profile_search(dev, profile_id);
2314 if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && !profile) {
2315 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2316 error->message = "invalid shaper profile";
2320 /* Minimum static priority count is 1 */
2321 if (!params->nonleaf.n_sp_priorities ||
2322 params->nonleaf.n_sp_priorities > TXSCH_TLX_SP_PRIO_MAX) {
2324 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
2325 error->message = "invalid sp priorities";
2330 /* Validate weight mode */
2331 for (i = 0; i < params->nonleaf.n_sp_priorities &&
2332 params->nonleaf.wfq_weight_mode; i++) {
2333 pkt_mode = !params->nonleaf.wfq_weight_mode[i];
2334 if (pkt_mode == !params->nonleaf.wfq_weight_mode[0])
2338 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
2339 error->message = "unsupported weight mode";
2343 if (profile && params->nonleaf.n_sp_priorities &&
2344 pkt_mode != profile->params.packet_mode) {
2345 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2346 error->message = "shaper wfq packet mode mismatch";
2351 /* Check if there is second DWRR already in siblings or holes in prio */
2352 if (validate_prio(dev, lvl, parent_node_id, priority, error))
2355 if (weight > MAX_SCHED_WEIGHT) {
2356 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2357 error->message = "max weight exceeded";
2361 rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2362 priority, weight, hw_lvl,
2365 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2366 /* cleanup user added nodes */
2368 nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2369 NIX_TM_NODE_USER, false);
2370 error->message = "failed to add node";
2373 error->type = RTE_TM_ERROR_TYPE_NONE;
2378 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2379 struct rte_tm_error *error)
2381 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2382 struct otx2_nix_tm_node *tm_node, *child_node;
2383 struct otx2_nix_tm_shaper_profile *profile;
2384 uint32_t profile_id;
2386 /* we don't support dynamic updates yet */
2387 if (dev->tm_flags & NIX_TM_COMMITTED) {
2388 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2389 error->message = "hierarchy exists";
2393 if (node_id == RTE_TM_NODE_ID_NULL) {
2394 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2395 error->message = "invalid node id";
2399 tm_node = nix_tm_node_search(dev, node_id, true);
2401 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2402 error->message = "no such node";
2406 /* Check for any existing children */
2407 TAILQ_FOREACH(child_node, &dev->node_list, node) {
2408 if (child_node->parent == tm_node) {
2409 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2410 error->message = "children exist";
2415 /* Remove shaper profile reference */
2416 profile_id = tm_node->params.shaper_profile_id;
2417 profile = nix_tm_shaper_profile_search(dev, profile_id);
2418 profile->reference_count--;
2420 TAILQ_REMOVE(&dev->node_list, tm_node, node);
2426 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2427 struct rte_tm_error *error, bool suspend)
2429 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2430 struct otx2_mbox *mbox = dev->mbox;
2431 struct otx2_nix_tm_node *tm_node;
2432 struct nix_txschq_config *req;
2436 tm_node = nix_tm_node_search(dev, node_id, true);
2438 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2439 error->message = "no such node";
2443 if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2444 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2445 error->message = "hierarchy doesn't exist";
2449 flags = tm_node->flags;
2450 flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2451 (flags | NIX_TM_NODE_ENABLED);
2453 if (tm_node->flags == flags)
2456 /* send mbox for state change */
2457 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2459 req->lvl = tm_node->hw_lvl;
2460 req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2461 req->reg, req->regval);
2462 rc = send_tm_reqval(mbox, req, error);
2464 tm_node->flags = flags;
2469 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2470 struct rte_tm_error *error)
2472 return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2476 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2477 struct rte_tm_error *error)
2479 return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2483 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2485 struct rte_tm_error *error)
2487 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2488 struct otx2_nix_tm_node *tm_node;
2489 uint32_t leaf_cnt = 0;
2492 if (dev->tm_flags & NIX_TM_COMMITTED) {
2493 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2494 error->message = "hierarchy exists";
2498 /* Check if we have all the leaf nodes */
2499 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2500 if (tm_node->flags & NIX_TM_NODE_USER &&
2501 tm_node->id < dev->tm_leaf_cnt)
2505 if (leaf_cnt != dev->tm_leaf_cnt) {
2506 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2507 error->message = "incomplete hierarchy";
2512 * Disable xmit will be enabled when
2513 * new topology is available.
2515 rc = nix_xmit_disable(eth_dev);
2517 otx2_err("failed to disable TX, rc=%d", rc);
2521 /* Delete default/ratelimit tree */
2522 if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2523 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2525 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2526 error->message = "failed to free default resources";
2529 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2530 NIX_TM_RATE_LIMIT_TREE);
2533 /* Free up user alloc'ed resources */
2534 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2535 NIX_TM_NODE_USER, true);
2537 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2538 error->message = "failed to free user resources";
2542 rc = nix_tm_alloc_resources(eth_dev, true);
2544 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2545 error->message = "alloc resources failed";
2546 /* TODO should we restore default config ? */
2548 nix_tm_free_resources(dev, 0, 0, false);
2552 error->type = RTE_TM_ERROR_TYPE_NONE;
2553 dev->tm_flags |= NIX_TM_COMMITTED;
2558 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2560 uint32_t profile_id,
2561 struct rte_tm_error *error)
2563 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2564 struct otx2_nix_tm_shaper_profile *profile = NULL;
2565 struct otx2_mbox *mbox = dev->mbox;
2566 struct otx2_nix_tm_node *tm_node;
2567 struct nix_txschq_config *req;
2571 tm_node = nix_tm_node_search(dev, node_id, true);
2572 if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2573 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2574 error->message = "invalid node";
2578 if (profile_id == tm_node->params.shaper_profile_id)
2581 if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2582 profile = nix_tm_shaper_profile_search(dev, profile_id);
2584 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2585 error->message = "shaper profile ID not exist";
2590 if (profile && profile->params.packet_mode != tm_node->pkt_mode) {
2591 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2592 error->message = "shaper profile pkt mode mismatch";
2596 tm_node->params.shaper_profile_id = profile_id;
2598 /* Nothing to do if not yet committed */
2599 if (!(dev->tm_flags & NIX_TM_COMMITTED))
2602 tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2604 /* Flush the specific node with SW_XOFF */
2605 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2606 req->lvl = tm_node->hw_lvl;
2607 k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2610 rc = send_tm_reqval(mbox, req, error);
2614 shaper_default_red_algo(dev, tm_node, profile);
2616 /* Update the PIR/CIR and clear SW XOFF */
2617 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2618 req->lvl = tm_node->hw_lvl;
2620 k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2622 k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2625 rc = send_tm_reqval(mbox, req, error);
2627 tm_node->flags |= NIX_TM_NODE_ENABLED;
2632 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2633 uint32_t node_id, uint32_t new_parent_id,
2634 uint32_t priority, uint32_t weight,
2635 struct rte_tm_error *error)
2637 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2638 struct otx2_nix_tm_node *tm_node, *sibling;
2639 struct otx2_nix_tm_node *new_parent;
2640 struct nix_txschq_config *req;
2644 if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2645 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2646 error->message = "hierarchy doesn't exist";
2650 tm_node = nix_tm_node_search(dev, node_id, true);
2652 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2653 error->message = "no such node";
2657 /* Parent id valid only for non root nodes */
2658 if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2659 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2661 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2662 error->message = "no such parent node";
2666 /* Current support is only for dynamic weight update */
2667 if (tm_node->parent != new_parent ||
2668 tm_node->priority != priority) {
2669 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2670 error->message = "only weight update supported";
2675 /* Skip if no change */
2676 if (tm_node->weight == weight)
2679 tm_node->weight = weight;
2681 /* For leaf nodes, SQ CTX needs update */
2682 if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2683 /* Update SQ quantum data on the fly */
2684 rc = nix_sq_sched_data(dev, tm_node, true);
2686 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2687 error->message = "sq sched data update failed";
2691 /* XOFF Parent node */
2692 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2693 req->lvl = tm_node->parent->hw_lvl;
2694 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2695 req->reg, req->regval);
2696 rc = send_tm_reqval(dev->mbox, req, error);
2700 /* XOFF this node and all other siblings */
2701 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2702 req->lvl = tm_node->hw_lvl;
2705 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2706 if (sibling->parent != tm_node->parent)
2708 k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2712 rc = send_tm_reqval(dev->mbox, req, error);
2716 /* Update new weight for current node */
2717 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2718 req->lvl = tm_node->hw_lvl;
2719 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2720 req->reg, req->regval);
2721 rc = send_tm_reqval(dev->mbox, req, error);
2725 /* XON this node and all other siblings */
2726 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2727 req->lvl = tm_node->hw_lvl;
2730 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2731 if (sibling->parent != tm_node->parent)
2733 k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2737 rc = send_tm_reqval(dev->mbox, req, error);
2741 /* XON Parent node */
2742 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2743 req->lvl = tm_node->parent->hw_lvl;
2744 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2745 req->reg, req->regval);
2746 rc = send_tm_reqval(dev->mbox, req, error);
2754 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2755 struct rte_tm_node_stats *stats,
2756 uint64_t *stats_mask, int clear,
2757 struct rte_tm_error *error)
2759 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2760 struct otx2_nix_tm_node *tm_node;
2765 tm_node = nix_tm_node_search(dev, node_id, true);
2767 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2768 error->message = "no such node";
2772 if (!(tm_node->flags & NIX_TM_NODE_HWRES)) {
2773 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2774 error->message = "HW resources not allocated";
2778 /* Stats support only for leaf node or TL1 root */
2779 if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2780 reg = (((uint64_t)tm_node->id) << 32);
2783 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2784 val = otx2_atomic64_add_nosync(reg, addr);
2787 stats->n_pkts = val - tm_node->last_pkts;
2790 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2791 val = otx2_atomic64_add_nosync(reg, addr);
2794 stats->n_bytes = val - tm_node->last_bytes;
2797 tm_node->last_pkts = stats->n_pkts;
2798 tm_node->last_bytes = stats->n_bytes;
2801 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2803 } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2804 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2805 error->message = "stats read error";
2807 /* RED Drop packets */
2808 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2809 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2812 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2813 val - tm_node->last_pkts;
2815 /* RED Drop bytes */
2816 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2817 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2820 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2821 val - tm_node->last_bytes;
2825 tm_node->last_pkts =
2826 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2827 tm_node->last_bytes =
2828 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2831 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2832 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2835 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2836 error->message = "unsupported node";
2844 const struct rte_tm_ops otx2_tm_ops = {
2845 .node_type_get = otx2_nix_tm_node_type_get,
2847 .capabilities_get = otx2_nix_tm_capa_get,
2848 .level_capabilities_get = otx2_nix_tm_level_capa_get,
2849 .node_capabilities_get = otx2_nix_tm_node_capa_get,
2851 .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2852 .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2854 .node_add = otx2_nix_tm_node_add,
2855 .node_delete = otx2_nix_tm_node_delete,
2856 .node_suspend = otx2_nix_tm_node_suspend,
2857 .node_resume = otx2_nix_tm_node_resume,
2858 .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2860 .node_shaper_update = otx2_nix_tm_node_shaper_update,
2861 .node_parent_update = otx2_nix_tm_node_parent_update,
2862 .node_stats_read = otx2_nix_tm_node_stats_read,
2866 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2868 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2869 uint32_t def = eth_dev->data->nb_tx_queues;
2870 struct rte_tm_node_params params;
2871 uint32_t leaf_parent, i;
2872 int rc = 0, leaf_level;
2874 /* Default params */
2875 memset(¶ms, 0, sizeof(params));
2876 params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2878 if (nix_tm_have_tl1_access(dev)) {
2879 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2880 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2883 OTX2_TM_LVL_ROOT, false, ¶ms);
2886 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2889 OTX2_TM_LVL_SCH1, false, ¶ms);
2893 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2896 OTX2_TM_LVL_SCH2, false, ¶ms);
2900 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2903 OTX2_TM_LVL_SCH3, false, ¶ms);
2907 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2910 OTX2_TM_LVL_SCH4, false, ¶ms);
2914 leaf_parent = def + 4;
2915 leaf_level = OTX2_TM_LVL_QUEUE;
2917 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2918 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2921 OTX2_TM_LVL_ROOT, false, ¶ms);
2925 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2928 OTX2_TM_LVL_SCH1, false, ¶ms);
2932 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2935 OTX2_TM_LVL_SCH2, false, ¶ms);
2939 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2942 OTX2_TM_LVL_SCH3, false, ¶ms);
2946 leaf_parent = def + 3;
2947 leaf_level = OTX2_TM_LVL_SCH4;
2950 /* Add leaf nodes */
2951 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2952 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2955 leaf_level, false, ¶ms);
2964 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2966 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2968 TAILQ_INIT(&dev->node_list);
2969 TAILQ_INIT(&dev->shaper_profile_list);
2970 dev->tm_rate_min = 1E9; /* 1Gbps */
2973 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2975 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2976 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2977 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2980 /* Free up all resources already held */
2981 rc = nix_tm_free_resources(dev, 0, 0, false);
2983 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2987 /* Clear shaper profiles */
2988 nix_tm_clear_shaper_profiles(dev);
2989 dev->tm_flags = NIX_TM_DEFAULT_TREE;
2991 /* Disable TL1 Static Priority when VF's are enabled
2992 * as otherwise VF's TL2 reallocation will be needed
2993 * runtime to support a specific topology of PF.
2995 if (pci_dev->max_vfs)
2996 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2998 rc = nix_tm_prepare_default_tree(eth_dev);
3002 rc = nix_tm_alloc_resources(eth_dev, false);
3005 dev->tm_leaf_cnt = sq_cnt;
3011 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
3013 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3014 uint32_t def = eth_dev->data->nb_tx_queues;
3015 struct rte_tm_node_params params;
3016 uint32_t leaf_parent, i, rc = 0;
3018 memset(¶ms, 0, sizeof(params));
3020 if (nix_tm_have_tl1_access(dev)) {
3021 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
3022 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
3025 OTX2_TM_LVL_ROOT, false, ¶ms);
3028 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
3031 OTX2_TM_LVL_SCH1, false, ¶ms);
3034 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
3037 OTX2_TM_LVL_SCH2, false, ¶ms);
3040 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
3043 OTX2_TM_LVL_SCH3, false, ¶ms);
3046 leaf_parent = def + 3;
3048 /* Add per queue SMQ nodes */
3049 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3050 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
3052 0, DEFAULT_RR_WEIGHT,
3060 /* Add leaf nodes */
3061 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3062 rc = nix_tm_node_add_to_list(dev, i,
3063 leaf_parent + 1 + i, 0,
3075 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
3076 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
3077 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
3078 OTX2_TM_LVL_ROOT, false, ¶ms);
3081 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
3082 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
3083 OTX2_TM_LVL_SCH1, false, ¶ms);
3086 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
3087 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
3088 OTX2_TM_LVL_SCH2, false, ¶ms);
3091 leaf_parent = def + 2;
3093 /* Add per queue SMQ nodes */
3094 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3095 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
3097 0, DEFAULT_RR_WEIGHT,
3105 /* Add leaf nodes */
3106 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3107 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
3120 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3121 struct otx2_nix_tm_node *tm_node,
3124 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3125 struct otx2_nix_tm_shaper_profile profile;
3126 struct otx2_mbox *mbox = dev->mbox;
3127 volatile uint64_t *reg, *regval;
3128 struct nix_txschq_config *req;
3133 flags = tm_node->flags;
3135 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3136 req->lvl = NIX_TXSCH_LVL_MDQ;
3138 regval = req->regval;
3141 k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]);
3142 flags &= ~NIX_TM_NODE_ENABLED;
3146 if (!(flags & NIX_TM_NODE_ENABLED)) {
3147 k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]);
3148 flags |= NIX_TM_NODE_ENABLED;
3151 /* Use only PIR for rate limit */
3152 memset(&profile, 0, sizeof(profile));
3153 profile.params.peak.rate = tx_rate;
3154 /* Minimum burst of ~4us Bytes of Tx */
3155 profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3156 (4ull * tx_rate) / (1E6 * 8));
3157 if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3158 dev->tm_rate_min = tx_rate;
3160 k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]);
3163 rc = otx2_mbox_process(mbox);
3167 tm_node->flags = flags;
3172 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3173 uint16_t queue_idx, uint16_t tx_rate_mbps)
3175 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3176 uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3177 struct otx2_nix_tm_node *tm_node;
3180 /* Check for supported revisions */
3181 if (otx2_dev_is_95xx_Ax(dev) ||
3182 otx2_dev_is_96xx_Ax(dev))
3185 if (queue_idx >= eth_dev->data->nb_tx_queues)
3188 if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3189 !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3192 if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3193 eth_dev->data->nb_tx_queues > 1) {
3194 /* For TM topology change ethdev needs to be stopped */
3195 if (eth_dev->data->dev_started)
3199 * Disable xmit will be enabled when
3200 * new topology is available.
3202 rc = nix_xmit_disable(eth_dev);
3204 otx2_err("failed to disable TX, rc=%d", rc);
3208 rc = nix_tm_free_resources(dev, 0, 0, false);
3210 otx2_tm_dbg("failed to free default resources, rc %d",
3215 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3217 otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3221 rc = nix_tm_alloc_resources(eth_dev, true);
3223 otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3227 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3228 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3231 tm_node = nix_tm_node_search(dev, queue_idx, false);
3233 /* check if we found a valid leaf node */
3235 !nix_tm_is_leaf(dev, tm_node->lvl) ||
3237 tm_node->parent->hw_id == UINT32_MAX)
3240 return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3242 otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3247 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3249 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3254 /* Check for supported revisions */
3255 if (otx2_dev_is_95xx_Ax(dev) ||
3256 otx2_dev_is_96xx_Ax(dev))
3259 *(const void **)arg = &otx2_tm_ops;
3265 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3267 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3270 /* Xmit is assumed to be disabled */
3271 /* Free up resources already held */
3272 rc = nix_tm_free_resources(dev, 0, 0, false);
3274 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3278 /* Clear shaper profiles */
3279 nix_tm_clear_shaper_profiles(dev);
3286 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3287 uint32_t *rr_quantum, uint16_t *smq)
3289 struct otx2_nix_tm_node *tm_node;
3292 /* 0..sq_cnt-1 are leaf nodes */
3293 if (sq >= dev->tm_leaf_cnt)
3296 /* Search for internal node first */
3297 tm_node = nix_tm_node_search(dev, sq, false);
3299 tm_node = nix_tm_node_search(dev, sq, true);
3301 /* Check if we found a valid leaf node */
3302 if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3303 !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3307 /* Get SMQ Id of leaf node's parent */
3308 *smq = tm_node->parent->hw_id;
3309 *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3311 rc = nix_smq_xoff(dev, tm_node->parent, false);
3314 tm_node->flags |= NIX_TM_NODE_ENABLED;