1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_malloc.h>
7 #include "otx2_ethdev.h"
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
13 enum otx2_tm_node_level {
24 uint64_t shaper2regval(struct shaper_params *shaper)
26 return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28 (shaper->mantissa << 1);
32 nix_get_link(struct otx2_eth_dev *dev)
34 int link = 13 /* SDP */;
38 lmac_chan = dev->tx_chan_base;
41 if (lmac_chan >= 0x800) {
42 map = lmac_chan & 0x7FF;
43 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44 } else if (lmac_chan < 0x700) {
53 nix_get_relchan(struct otx2_eth_dev *dev)
55 return dev->tx_chan_base & 0xff;
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
61 bool is_lbk = otx2_dev_is_lbk(dev);
62 return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) &&
63 !is_lbk && !dev->maxvf;
67 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
69 struct otx2_nix_tm_node *child_node;
71 TAILQ_FOREACH(child_node, &dev->node_list, node) {
72 if (!child_node->parent)
74 if (!(child_node->parent->id == node_id))
76 if (child_node->priority == child_node->parent->rr_prio)
78 return child_node->hw_id - child_node->priority;
84 static struct otx2_nix_tm_shaper_profile *
85 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
87 struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
89 TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
90 if (tm_shaper_profile->shaper_profile_id == shaper_id)
91 return tm_shaper_profile;
96 static inline uint64_t
97 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
98 uint64_t *mantissa_p, uint64_t *div_exp_p)
100 uint64_t div_exp, exponent, mantissa;
102 /* Boundary checks */
103 if (value < MIN_SHAPER_RATE ||
104 value > MAX_SHAPER_RATE)
107 if (value <= SHAPER_RATE(0, 0, 0)) {
108 /* Calculate rate div_exp and mantissa using
109 * the following formula:
111 * value = (2E6 * (256 + mantissa)
112 * / ((1 << div_exp) * 256))
116 mantissa = MAX_RATE_MANTISSA;
118 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
122 ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
123 ((1 << div_exp) * 256)))
126 /* Calculate rate exponent and mantissa using
127 * the following formula:
129 * value = (2E6 * ((256 + mantissa) << exponent)) / 256
133 exponent = MAX_RATE_EXPONENT;
134 mantissa = MAX_RATE_MANTISSA;
136 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
139 while (value < ((NIX_SHAPER_RATE_CONST *
140 ((256 + mantissa) << exponent)) / 256))
144 if (div_exp > MAX_RATE_DIV_EXP ||
145 exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
149 *div_exp_p = div_exp;
151 *exponent_p = exponent;
153 *mantissa_p = mantissa;
155 /* Calculate real rate value */
156 return SHAPER_RATE(exponent, mantissa, div_exp);
159 static inline uint64_t
160 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
161 uint64_t *mantissa_p)
163 uint64_t exponent, mantissa;
165 if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
168 /* Calculate burst exponent and mantissa using
169 * the following formula:
171 * value = (((256 + mantissa) << (exponent + 1)
175 exponent = MAX_BURST_EXPONENT;
176 mantissa = MAX_BURST_MANTISSA;
178 while (value < (1ull << (exponent + 1)))
181 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
184 if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
188 *exponent_p = exponent;
190 *mantissa_p = mantissa;
192 return SHAPER_BURST(exponent, mantissa);
196 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
197 struct shaper_params *cir,
198 struct shaper_params *pir)
200 struct rte_tm_shaper_params *param = &profile->params;
205 /* Calculate CIR exponent and mantissa */
206 if (param->committed.rate)
207 cir->rate = shaper_rate_to_nix(param->committed.rate,
212 /* Calculate PIR exponent and mantissa */
213 if (param->peak.rate)
214 pir->rate = shaper_rate_to_nix(param->peak.rate,
219 /* Calculate CIR burst exponent and mantissa */
220 if (param->committed.size)
221 cir->burst = shaper_burst_to_nix(param->committed.size,
222 &cir->burst_exponent,
223 &cir->burst_mantissa);
225 /* Calculate PIR burst exponent and mantissa */
226 if (param->peak.size)
227 pir->burst = shaper_burst_to_nix(param->peak.size,
228 &pir->burst_exponent,
229 &pir->burst_mantissa);
233 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
235 struct otx2_mbox *mbox = dev->mbox;
236 struct nix_txschq_config *req;
239 * Default config for TL1.
240 * For VF this is always ignored.
243 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
244 req->lvl = NIX_TXSCH_LVL_TL1;
246 /* Set DWRR quantum */
247 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
248 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
251 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
252 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
255 req->reg[2] = NIX_AF_TL1X_CIR(schq);
259 return otx2_mbox_process(mbox);
263 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
264 struct otx2_nix_tm_node *tm_node,
265 volatile uint64_t *reg, volatile uint64_t *regval)
267 uint64_t strict_prio = tm_node->priority;
268 uint32_t hw_lvl = tm_node->hw_lvl;
269 uint32_t schq = tm_node->hw_id;
273 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
275 /* For children to root, strict prio is default if either
276 * device root is TL2 or TL1 Static Priority is disabled.
278 if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
279 (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
280 dev->tm_flags & NIX_TM_TL1_NO_SP))
281 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
283 otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
284 "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
285 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
286 tm_node->id, strict_prio, rr_quantum, tm_node);
289 case NIX_TXSCH_LVL_SMQ:
290 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
291 regval[k] = (strict_prio << 24) | rr_quantum;
295 case NIX_TXSCH_LVL_TL4:
296 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
297 regval[k] = (strict_prio << 24) | rr_quantum;
301 case NIX_TXSCH_LVL_TL3:
302 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
303 regval[k] = (strict_prio << 24) | rr_quantum;
307 case NIX_TXSCH_LVL_TL2:
308 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
309 regval[k] = (strict_prio << 24) | rr_quantum;
313 case NIX_TXSCH_LVL_TL1:
314 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
315 regval[k] = rr_quantum;
325 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
326 struct otx2_nix_tm_shaper_profile *profile,
327 volatile uint64_t *reg, volatile uint64_t *regval)
329 struct shaper_params cir, pir;
330 uint32_t schq = tm_node->hw_id;
333 memset(&cir, 0, sizeof(cir));
334 memset(&pir, 0, sizeof(pir));
335 shaper_config_to_nix(profile, &cir, &pir);
337 otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
338 "pir %" PRIu64 "(%" PRIu64 "B),"
339 " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
340 nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
341 tm_node->id, pir.rate, pir.burst,
342 cir.rate, cir.burst, tm_node);
344 switch (tm_node->hw_lvl) {
345 case NIX_TXSCH_LVL_SMQ:
346 /* Configure PIR, CIR */
347 reg[k] = NIX_AF_MDQX_PIR(schq);
348 regval[k] = (pir.rate && pir.burst) ?
349 (shaper2regval(&pir) | 1) : 0;
352 reg[k] = NIX_AF_MDQX_CIR(schq);
353 regval[k] = (cir.rate && cir.burst) ?
354 (shaper2regval(&cir) | 1) : 0;
357 /* Configure RED ALG */
358 reg[k] = NIX_AF_MDQX_SHAPE(schq);
359 regval[k] = ((uint64_t)tm_node->red_algo << 9);
362 case NIX_TXSCH_LVL_TL4:
363 /* Configure PIR, CIR */
364 reg[k] = NIX_AF_TL4X_PIR(schq);
365 regval[k] = (pir.rate && pir.burst) ?
366 (shaper2regval(&pir) | 1) : 0;
369 reg[k] = NIX_AF_TL4X_CIR(schq);
370 regval[k] = (cir.rate && cir.burst) ?
371 (shaper2regval(&cir) | 1) : 0;
374 /* Configure RED algo */
375 reg[k] = NIX_AF_TL4X_SHAPE(schq);
376 regval[k] = ((uint64_t)tm_node->red_algo << 9);
379 case NIX_TXSCH_LVL_TL3:
380 /* Configure PIR, CIR */
381 reg[k] = NIX_AF_TL3X_PIR(schq);
382 regval[k] = (pir.rate && pir.burst) ?
383 (shaper2regval(&pir) | 1) : 0;
386 reg[k] = NIX_AF_TL3X_CIR(schq);
387 regval[k] = (cir.rate && cir.burst) ?
388 (shaper2regval(&cir) | 1) : 0;
391 /* Configure RED algo */
392 reg[k] = NIX_AF_TL3X_SHAPE(schq);
393 regval[k] = ((uint64_t)tm_node->red_algo << 9);
397 case NIX_TXSCH_LVL_TL2:
398 /* Configure PIR, CIR */
399 reg[k] = NIX_AF_TL2X_PIR(schq);
400 regval[k] = (pir.rate && pir.burst) ?
401 (shaper2regval(&pir) | 1) : 0;
404 reg[k] = NIX_AF_TL2X_CIR(schq);
405 regval[k] = (cir.rate && cir.burst) ?
406 (shaper2regval(&cir) | 1) : 0;
409 /* Configure RED algo */
410 reg[k] = NIX_AF_TL2X_SHAPE(schq);
411 regval[k] = ((uint64_t)tm_node->red_algo << 9);
415 case NIX_TXSCH_LVL_TL1:
417 reg[k] = NIX_AF_TL1X_CIR(schq);
418 regval[k] = (cir.rate && cir.burst) ?
419 (shaper2regval(&cir) | 1) : 0;
428 populate_tm_reg(struct otx2_eth_dev *dev,
429 struct otx2_nix_tm_node *tm_node)
431 struct otx2_nix_tm_shaper_profile *profile;
432 uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
433 uint64_t regval[MAX_REGS_PER_MBOX_MSG];
434 uint64_t reg[MAX_REGS_PER_MBOX_MSG];
435 struct otx2_mbox *mbox = dev->mbox;
436 uint64_t parent = 0, child = 0;
437 uint32_t hw_lvl, rr_prio, schq;
438 struct nix_txschq_config *req;
442 memset(regval_mask, 0, sizeof(regval_mask));
443 profile = nix_tm_shaper_profile_search(dev,
444 tm_node->params.shaper_profile_id);
445 rr_prio = tm_node->rr_prio;
446 hw_lvl = tm_node->hw_lvl;
447 schq = tm_node->hw_id;
449 /* Root node will not have a parent node */
450 if (hw_lvl == dev->otx2_tm_root_lvl)
451 parent = tm_node->parent_hw_id;
453 parent = tm_node->parent->hw_id;
455 /* Do we need this trigger to configure TL1 */
456 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
457 hw_lvl == dev->otx2_tm_root_lvl) {
458 rc = populate_tm_tl1_default(dev, parent);
463 if (hw_lvl != NIX_TXSCH_LVL_SMQ)
464 child = find_prio_anchor(dev, tm_node->id);
466 /* Override default rr_prio when TL1
467 * Static Priority is disabled
469 if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
470 dev->tm_flags & NIX_TM_TL1_NO_SP) {
471 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
475 otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
476 " prio_anchor %"PRIu64" rr_prio %u (%p)",
477 nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
478 parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
480 /* Prepare Topology and Link config */
482 case NIX_TXSCH_LVL_SMQ:
484 /* Set xoff which will be cleared later */
485 reg[k] = NIX_AF_SMQX_CFG(schq);
486 regval[k] = BIT_ULL(50);
487 regval_mask[k] = ~BIT_ULL(50);
490 /* Parent and schedule conf */
491 reg[k] = NIX_AF_MDQX_PARENT(schq);
492 regval[k] = parent << 16;
496 case NIX_TXSCH_LVL_TL4:
497 /* Parent and schedule conf */
498 reg[k] = NIX_AF_TL4X_PARENT(schq);
499 regval[k] = parent << 16;
502 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
503 regval[k] = (child << 32) | (rr_prio << 1);
506 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
507 if (otx2_dev_is_sdp(dev)) {
508 reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
509 regval[k] = BIT_ULL(12);
513 case NIX_TXSCH_LVL_TL3:
514 /* Parent and schedule conf */
515 reg[k] = NIX_AF_TL3X_PARENT(schq);
516 regval[k] = parent << 16;
519 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
520 regval[k] = (child << 32) | (rr_prio << 1);
523 /* Link configuration */
524 if (!otx2_dev_is_sdp(dev) &&
525 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
526 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
528 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
533 case NIX_TXSCH_LVL_TL2:
534 /* Parent and schedule conf */
535 reg[k] = NIX_AF_TL2X_PARENT(schq);
536 regval[k] = parent << 16;
539 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
540 regval[k] = (child << 32) | (rr_prio << 1);
543 /* Link configuration */
544 if (!otx2_dev_is_sdp(dev) &&
545 dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
546 reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
548 regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
553 case NIX_TXSCH_LVL_TL1:
554 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
555 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
561 /* Prepare schedule config */
562 k += prepare_tm_sched_reg(dev, tm_node, ®[k], ®val[k]);
564 /* Prepare shaping config */
565 k += prepare_tm_shaper_reg(tm_node, profile, ®[k], ®val[k]);
570 /* Copy and send config mbox */
571 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
575 otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
576 otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
577 otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
579 rc = otx2_mbox_process(mbox);
585 otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
591 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
593 struct otx2_nix_tm_node *tm_node;
597 for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
598 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
599 if (tm_node->hw_lvl == hw_lvl &&
600 tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
601 rc = populate_tm_reg(dev, tm_node);
611 static struct otx2_nix_tm_node *
612 nix_tm_node_search(struct otx2_eth_dev *dev,
613 uint32_t node_id, bool user)
615 struct otx2_nix_tm_node *tm_node;
617 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
618 if (tm_node->id == node_id &&
619 (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
626 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
628 struct otx2_nix_tm_node *tm_node;
631 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
632 if (!tm_node->parent)
635 if (!(tm_node->parent->id == parent_id))
638 if (tm_node->priority == priority)
645 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
647 struct otx2_nix_tm_node *tm_node_child;
648 struct otx2_nix_tm_node *tm_node;
649 struct otx2_nix_tm_node *parent;
653 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
654 if (!tm_node->parent)
656 /* Count group of children of same priority i.e are RR */
657 parent = tm_node->parent;
658 priority = tm_node->priority;
659 rr_num = check_rr(dev, priority, parent->id);
661 /* Assuming that multiple RR groups are
662 * not configured based on capability.
665 parent->rr_prio = priority;
666 parent->rr_num = rr_num;
669 /* Find out static priority children that are not in RR */
670 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
671 if (!tm_node_child->parent)
673 if (parent->id != tm_node_child->parent->id)
675 if (parent->max_prio == UINT32_MAX &&
676 tm_node_child->priority != parent->rr_prio)
677 parent->max_prio = 0;
679 if (parent->max_prio < tm_node_child->priority &&
680 parent->rr_prio != tm_node_child->priority)
681 parent->max_prio = tm_node_child->priority;
689 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
690 uint32_t parent_node_id, uint32_t priority,
691 uint32_t weight, uint16_t hw_lvl,
692 uint16_t lvl, bool user,
693 struct rte_tm_node_params *params)
695 struct otx2_nix_tm_shaper_profile *shaper_profile;
696 struct otx2_nix_tm_node *tm_node, *parent_node;
697 uint32_t shaper_profile_id;
699 shaper_profile_id = params->shaper_profile_id;
700 shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);
702 parent_node = nix_tm_node_search(dev, parent_node_id, user);
704 tm_node = rte_zmalloc("otx2_nix_tm_node",
705 sizeof(struct otx2_nix_tm_node), 0);
710 tm_node->hw_lvl = hw_lvl;
712 tm_node->id = node_id;
713 tm_node->priority = priority;
714 tm_node->weight = weight;
715 tm_node->rr_prio = 0xf;
716 tm_node->max_prio = UINT32_MAX;
717 tm_node->hw_id = UINT32_MAX;
720 tm_node->flags = NIX_TM_NODE_USER;
721 rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
724 shaper_profile->reference_count++;
725 tm_node->parent = parent_node;
726 tm_node->parent_hw_id = UINT32_MAX;
728 TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
734 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
736 struct otx2_nix_tm_shaper_profile *shaper_profile;
738 while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
739 if (shaper_profile->reference_count)
740 otx2_tm_dbg("Shaper profile %u has non zero references",
741 shaper_profile->shaper_profile_id);
742 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
743 rte_free(shaper_profile);
750 nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable)
752 struct otx2_mbox *mbox = dev->mbox;
753 struct nix_txschq_config *req;
755 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
756 req->lvl = NIX_TXSCH_LVL_SMQ;
759 req->reg[0] = NIX_AF_SMQX_CFG(smq);
760 /* Unmodified fields */
761 req->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) |
762 (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
765 req->regval[0] |= BIT_ULL(50) | BIT_ULL(49);
769 return otx2_mbox_process(mbox);
773 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
775 struct otx2_eth_txq *txq = __txq;
776 struct npa_aq_enq_req *req;
777 struct npa_aq_enq_rsp *rsp;
778 struct otx2_npa_lf *lf;
779 struct otx2_mbox *mbox;
780 uint64_t aura_handle;
783 lf = otx2_npa_lf_obj_get();
787 /* Set/clear sqb aura fc_ena */
788 aura_handle = txq->sqb_pool->pool_id;
789 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
791 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
792 req->ctype = NPA_AQ_CTYPE_AURA;
793 req->op = NPA_AQ_INSTOP_WRITE;
794 /* Below is not needed for aura writes but AF driver needs it */
795 /* AF will translate to associated poolctx */
796 req->aura.pool_addr = req->aura_id;
798 req->aura.fc_ena = enable;
799 req->aura_mask.fc_ena = 1;
801 rc = otx2_mbox_process(mbox);
805 /* Read back npa aura ctx */
806 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
808 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
809 req->ctype = NPA_AQ_CTYPE_AURA;
810 req->op = NPA_AQ_INSTOP_READ;
812 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
816 /* Init when enabled as there might be no triggers */
818 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
820 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
821 /* Sync write barrier */
828 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
830 uint16_t sqb_cnt, head_off, tail_off;
831 struct otx2_eth_dev *dev = txq->dev;
832 uint16_t sq = txq->sq;
837 reg = ((uint64_t)sq << 32);
838 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
839 val = otx2_atomic64_add_nosync(reg, regaddr);
841 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
842 val = otx2_atomic64_add_nosync(reg, regaddr);
843 sqb_cnt = val & 0xFFFF;
844 head_off = (val >> 20) & 0x3F;
845 tail_off = (val >> 28) & 0x3F;
847 /* SQ reached quiescent state */
848 if (sqb_cnt <= 1 && head_off == tail_off &&
849 (*txq->fc_mem == txq->nb_sqb_bufs)) {
858 otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)
860 struct otx2_eth_txq *txq = __txq;
861 struct otx2_eth_dev *dev = txq->dev;
862 struct otx2_mbox *mbox = dev->mbox;
863 struct nix_aq_enq_req *req;
864 struct nix_aq_enq_rsp *rsp;
868 /* Get smq from sq */
869 req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
871 req->ctype = NIX_AQ_CTYPE_SQ;
872 req->op = NIX_AQ_INSTOP_READ;
873 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
875 otx2_err("Failed to get smq, rc=%d", rc);
879 /* Check if sq is enabled */
885 /* Enable CGX RXTX to drain pkts */
887 rc = otx2_cgx_rxtx_start(dev);
892 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
894 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
898 /* Disable smq xoff for case it was enabled earlier */
899 rc = nix_smq_xoff(dev, smq, false);
901 otx2_err("Failed to enable smq for sq %u, rc=%d", txq->sq, rc);
905 /* Wait for sq entries to be flushed */
906 nix_txq_flush_sq_spin(txq);
908 /* Flush and enable smq xoff */
909 rc = nix_smq_xoff(dev, smq, true);
911 otx2_err("Failed to disable smq for sq %u, rc=%d", txq->sq, rc);
916 /* Restore cgx state */
918 rc |= otx2_cgx_rxtx_stop(dev);
924 nix_tm_sw_xon(struct otx2_eth_txq *txq,
925 uint16_t smq, uint32_t rr_quantum)
927 struct otx2_eth_dev *dev = txq->dev;
928 struct otx2_mbox *mbox = dev->mbox;
929 struct nix_aq_enq_req *req;
932 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum %u",
933 txq->sq, txq->sq, rr_quantum);
934 /* Set smq from sq */
935 req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
937 req->ctype = NIX_AQ_CTYPE_SQ;
938 req->op = NIX_AQ_INSTOP_WRITE;
940 req->sq.smq_rr_quantum = rr_quantum;
941 req->sq_mask.smq = ~req->sq_mask.smq;
942 req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
944 rc = otx2_mbox_process(mbox);
946 otx2_err("Failed to set smq, rc=%d", rc);
950 /* Enable sqb_aura fc */
951 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
953 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
957 /* Disable smq xoff */
958 rc = nix_smq_xoff(dev, smq, false);
960 otx2_err("Failed to enable smq for sq %u", txq->sq);
968 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
969 uint32_t flags, bool hw_only)
971 struct otx2_nix_tm_shaper_profile *shaper_profile;
972 struct otx2_nix_tm_node *tm_node, *next_node;
973 struct otx2_mbox *mbox = dev->mbox;
974 struct nix_txsch_free_req *req;
975 uint32_t shaper_profile_id;
976 bool skip_node = false;
979 next_node = TAILQ_FIRST(&dev->node_list);
982 next_node = TAILQ_NEXT(tm_node, node);
984 /* Check for only requested nodes */
985 if ((tm_node->flags & flags_mask) != flags)
988 if (nix_tm_have_tl1_access(dev) &&
989 tm_node->hw_lvl == NIX_TXSCH_LVL_TL1)
992 otx2_tm_dbg("Free hwres for node %u, hwlvl %u, hw_id %u (%p)",
993 tm_node->id, tm_node->hw_lvl,
994 tm_node->hw_id, tm_node);
995 /* Free specific HW resource if requested */
996 if (!skip_node && flags_mask &&
997 tm_node->flags & NIX_TM_NODE_HWRES) {
998 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1000 req->schq_lvl = tm_node->hw_lvl;
1001 req->schq = tm_node->hw_id;
1002 rc = otx2_mbox_process(mbox);
1008 tm_node->flags &= ~NIX_TM_NODE_HWRES;
1010 /* Leave software elements if needed */
1014 shaper_profile_id = tm_node->params.shaper_profile_id;
1016 nix_tm_shaper_profile_search(dev, shaper_profile_id);
1018 shaper_profile->reference_count--;
1020 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1025 /* Free all hw resources */
1026 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1027 req->flags = TXSCHQ_FREE_ALL;
1029 return otx2_mbox_process(mbox);
1036 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1037 struct nix_txsch_alloc_rsp *rsp)
1042 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1043 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1044 dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1045 dev->txschq_contig_list[lvl][schq] =
1046 rsp->schq_contig_list[lvl][schq];
1049 dev->txschq[lvl] = rsp->schq[lvl];
1050 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1056 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1057 struct otx2_nix_tm_node *child,
1058 struct otx2_nix_tm_node *parent)
1060 uint32_t hw_id, schq_con_index, prio_offset;
1061 uint32_t l_id, schq_index;
1063 otx2_tm_dbg("Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)",
1064 child->id, child->lvl, child->hw_lvl, child);
1066 child->flags |= NIX_TM_NODE_HWRES;
1068 /* Process root nodes */
1069 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1070 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1072 uint32_t tschq_con_index;
1074 l_id = child->hw_lvl;
1075 tschq_con_index = dev->txschq_contig_index[l_id];
1076 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1077 child->hw_id = hw_id;
1078 dev->txschq_contig_index[l_id]++;
1079 /* Update TL1 hw_id for its parent for config purpose */
1080 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1081 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1082 child->parent_hw_id = hw_id;
1085 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1086 child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1087 uint32_t tschq_con_index;
1089 l_id = child->hw_lvl;
1090 tschq_con_index = dev->txschq_index[l_id];
1091 hw_id = dev->txschq_list[l_id][tschq_con_index];
1092 child->hw_id = hw_id;
1093 dev->txschq_index[l_id]++;
1097 /* Process children with parents */
1098 l_id = child->hw_lvl;
1099 schq_index = dev->txschq_index[l_id];
1100 schq_con_index = dev->txschq_contig_index[l_id];
1102 if (child->priority == parent->rr_prio) {
1103 hw_id = dev->txschq_list[l_id][schq_index];
1104 child->hw_id = hw_id;
1105 child->parent_hw_id = parent->hw_id;
1106 dev->txschq_index[l_id]++;
1108 prio_offset = schq_con_index + child->priority;
1109 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1110 child->hw_id = hw_id;
1116 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1118 struct otx2_nix_tm_node *parent, *child;
1119 uint32_t child_hw_lvl, con_index_inc, i;
1121 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1122 TAILQ_FOREACH(parent, &dev->node_list, node) {
1123 child_hw_lvl = parent->hw_lvl - 1;
1124 if (parent->hw_lvl != i)
1126 TAILQ_FOREACH(child, &dev->node_list, node) {
1129 if (child->parent->id != parent->id)
1131 nix_tm_assign_id_to_node(dev, child, parent);
1134 con_index_inc = parent->max_prio + 1;
1135 dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1138 * Explicitly assign id to parent node if it
1139 * doesn't have a parent
1141 if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1142 nix_tm_assign_id_to_node(dev, parent, NULL);
1149 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1150 struct nix_txsch_alloc_req *req, uint8_t lvl)
1152 struct otx2_nix_tm_node *tm_node;
1153 uint8_t contig_count;
1155 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1156 if (lvl == tm_node->hw_lvl) {
1157 req->schq[lvl - 1] += tm_node->rr_num;
1158 if (tm_node->max_prio != UINT32_MAX) {
1159 contig_count = tm_node->max_prio + 1;
1160 req->schq_contig[lvl - 1] += contig_count;
1163 if (lvl == dev->otx2_tm_root_lvl &&
1164 dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1165 tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1166 req->schq_contig[dev->otx2_tm_root_lvl]++;
1170 req->schq[NIX_TXSCH_LVL_TL1] = 1;
1171 req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1177 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1178 struct nix_txsch_alloc_req *req)
1182 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1183 nix_tm_count_req_schq(dev, req, i);
1185 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1186 dev->txschq_index[i] = 0;
1187 dev->txschq_contig_index[i] = 0;
1193 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1195 struct otx2_mbox *mbox = dev->mbox;
1196 struct nix_txsch_alloc_req *req;
1197 struct nix_txsch_alloc_rsp *rsp;
1200 req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1202 rc = nix_tm_prepare_txschq_req(dev, req);
1206 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1210 nix_tm_copy_rsp_to_dev(dev, rsp);
1211 dev->link_cfg_lvl = rsp->link_cfg_lvl;
1213 nix_tm_assign_hw_id(dev);
1218 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1220 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1221 struct otx2_nix_tm_node *tm_node;
1222 uint16_t sq, smq, rr_quantum;
1223 struct otx2_eth_txq *txq;
1226 nix_tm_update_parent_info(dev);
1228 rc = nix_tm_send_txsch_alloc_msg(dev);
1230 otx2_err("TM failed to alloc tm resources=%d", rc);
1234 rc = nix_tm_txsch_reg_config(dev);
1236 otx2_err("TM failed to configure sched registers=%d", rc);
1240 /* Enable xmit as all the topology is ready */
1241 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1242 if (tm_node->flags & NIX_TM_NODE_ENABLED)
1245 /* Enable xmit on sq */
1246 if (tm_node->lvl != OTX2_TM_LVL_QUEUE) {
1247 tm_node->flags |= NIX_TM_NODE_ENABLED;
1251 /* Don't enable SMQ or mark as enable */
1256 if (sq > eth_dev->data->nb_tx_queues) {
1261 txq = eth_dev->data->tx_queues[sq];
1263 smq = tm_node->parent->hw_id;
1264 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1266 rc = nix_tm_sw_xon(txq, smq, rr_quantum);
1269 tm_node->flags |= NIX_TM_NODE_ENABLED;
1273 otx2_err("TM failed to enable xmit on sq %u, rc=%d", sq, rc);
1279 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
1281 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1282 uint32_t def = eth_dev->data->nb_tx_queues;
1283 struct rte_tm_node_params params;
1284 uint32_t leaf_parent, i;
1287 /* Default params */
1288 memset(¶ms, 0, sizeof(params));
1289 params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
1291 if (nix_tm_have_tl1_access(dev)) {
1292 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
1293 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1296 OTX2_TM_LVL_ROOT, false, ¶ms);
1299 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1302 OTX2_TM_LVL_SCH1, false, ¶ms);
1306 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1309 OTX2_TM_LVL_SCH2, false, ¶ms);
1313 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1316 OTX2_TM_LVL_SCH3, false, ¶ms);
1320 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
1323 OTX2_TM_LVL_SCH4, false, ¶ms);
1327 leaf_parent = def + 4;
1329 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
1330 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1333 OTX2_TM_LVL_ROOT, false, ¶ms);
1337 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1340 OTX2_TM_LVL_SCH1, false, ¶ms);
1344 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1347 OTX2_TM_LVL_SCH2, false, ¶ms);
1351 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1354 OTX2_TM_LVL_SCH3, false, ¶ms);
1358 leaf_parent = def + 3;
1361 /* Add leaf nodes */
1362 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1363 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
1366 OTX2_TM_LVL_QUEUE, false, ¶ms);
1375 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
1377 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1379 TAILQ_INIT(&dev->node_list);
1380 TAILQ_INIT(&dev->shaper_profile_list);
1383 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
1385 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1386 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1387 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1390 /* Free up all resources already held */
1391 rc = nix_tm_free_resources(dev, 0, 0, false);
1393 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1397 /* Clear shaper profiles */
1398 nix_tm_clear_shaper_profiles(dev);
1399 dev->tm_flags = NIX_TM_DEFAULT_TREE;
1401 /* Disable TL1 Static Priority when VF's are enabled
1402 * as otherwise VF's TL2 reallocation will be needed
1403 * runtime to support a specific topology of PF.
1405 if (pci_dev->max_vfs)
1406 dev->tm_flags |= NIX_TM_TL1_NO_SP;
1408 rc = nix_tm_prepare_default_tree(eth_dev);
1412 rc = nix_tm_alloc_resources(eth_dev, false);
1415 dev->tm_leaf_cnt = sq_cnt;
1421 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
1423 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1426 /* Xmit is assumed to be disabled */
1427 /* Free up resources already held */
1428 rc = nix_tm_free_resources(dev, 0, 0, false);
1430 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1434 /* Clear shaper profiles */
1435 nix_tm_clear_shaper_profiles(dev);
1442 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
1443 uint32_t *rr_quantum, uint16_t *smq)
1445 struct otx2_nix_tm_node *tm_node;
1448 /* 0..sq_cnt-1 are leaf nodes */
1449 if (sq >= dev->tm_leaf_cnt)
1452 /* Search for internal node first */
1453 tm_node = nix_tm_node_search(dev, sq, false);
1455 tm_node = nix_tm_node_search(dev, sq, true);
1457 /* Check if we found a valid leaf node */
1458 if (!tm_node || tm_node->lvl != OTX2_TM_LVL_QUEUE ||
1459 !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
1463 /* Get SMQ Id of leaf node's parent */
1464 *smq = tm_node->parent->hw_id;
1465 *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1467 rc = nix_smq_xoff(dev, *smq, false);
1470 tm_node->flags |= NIX_TM_NODE_ENABLED;