1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_malloc.h>
7 #include "otx2_ethdev.h"
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
13 enum otx2_tm_node_level {
24 uint64_t shaper2regval(struct shaper_params *shaper)
26 return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28 (shaper->mantissa << 1);
32 nix_get_link(struct otx2_eth_dev *dev)
34 int link = 13 /* SDP */;
38 lmac_chan = dev->tx_chan_base;
41 if (lmac_chan >= 0x800) {
42 map = lmac_chan & 0x7FF;
43 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44 } else if (lmac_chan < 0x700) {
53 nix_get_relchan(struct otx2_eth_dev *dev)
55 return dev->tx_chan_base & 0xff;
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
61 bool is_lbk = otx2_dev_is_lbk(dev);
62 return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) &&
63 !is_lbk && !dev->maxvf;
67 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
69 struct otx2_nix_tm_node *child_node;
71 TAILQ_FOREACH(child_node, &dev->node_list, node) {
72 if (!child_node->parent)
74 if (!(child_node->parent->id == node_id))
76 if (child_node->priority == child_node->parent->rr_prio)
78 return child_node->hw_id - child_node->priority;
84 static struct otx2_nix_tm_shaper_profile *
85 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
87 struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
89 TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
90 if (tm_shaper_profile->shaper_profile_id == shaper_id)
91 return tm_shaper_profile;
96 static inline uint64_t
97 shaper_rate_to_nix(uint64_t cclk_hz, uint64_t cclk_ticks,
98 uint64_t value, uint64_t *exponent_p,
99 uint64_t *mantissa_p, uint64_t *div_exp_p)
101 uint64_t div_exp, exponent, mantissa;
103 /* Boundary checks */
104 if (value < MIN_SHAPER_RATE(cclk_hz, cclk_ticks) ||
105 value > MAX_SHAPER_RATE(cclk_hz, cclk_ticks))
108 if (value <= SHAPER_RATE(cclk_hz, cclk_ticks, 0, 0, 0)) {
109 /* Calculate rate div_exp and mantissa using
110 * the following formula:
112 * value = (cclk_hz * (256 + mantissa)
113 * / ((cclk_ticks << div_exp) * 256)
117 mantissa = MAX_RATE_MANTISSA;
119 while (value < (cclk_hz / (cclk_ticks << div_exp)))
123 ((cclk_hz * (256 + mantissa)) /
124 ((cclk_ticks << div_exp) * 256)))
127 /* Calculate rate exponent and mantissa using
128 * the following formula:
130 * value = (cclk_hz * ((256 + mantissa) << exponent)
131 * / (cclk_ticks * 256)
135 exponent = MAX_RATE_EXPONENT;
136 mantissa = MAX_RATE_MANTISSA;
138 while (value < (cclk_hz * (1 << exponent)) / cclk_ticks)
141 while (value < (cclk_hz * ((256 + mantissa) << exponent)) /
146 if (div_exp > MAX_RATE_DIV_EXP ||
147 exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
151 *div_exp_p = div_exp;
153 *exponent_p = exponent;
155 *mantissa_p = mantissa;
157 /* Calculate real rate value */
158 return SHAPER_RATE(cclk_hz, cclk_ticks, exponent, mantissa, div_exp);
161 static inline uint64_t
162 lx_shaper_rate_to_nix(uint64_t cclk_hz, uint32_t hw_lvl,
163 uint64_t value, uint64_t *exponent,
164 uint64_t *mantissa, uint64_t *div_exp)
166 if (hw_lvl == NIX_TXSCH_LVL_TL1)
167 return shaper_rate_to_nix(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS,
168 value, exponent, mantissa, div_exp);
170 return shaper_rate_to_nix(cclk_hz, LX_TIME_WHEEL_CCLK_TICKS,
171 value, exponent, mantissa, div_exp);
174 static inline uint64_t
175 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
176 uint64_t *mantissa_p)
178 uint64_t exponent, mantissa;
180 if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
183 /* Calculate burst exponent and mantissa using
184 * the following formula:
186 * value = (((256 + mantissa) << (exponent + 1)
190 exponent = MAX_BURST_EXPONENT;
191 mantissa = MAX_BURST_MANTISSA;
193 while (value < (1ull << (exponent + 1)))
196 while (value < ((256 + mantissa) << (exponent + 1)) / 256)
199 if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
203 *exponent_p = exponent;
205 *mantissa_p = mantissa;
207 return SHAPER_BURST(exponent, mantissa);
211 configure_shaper_cir_pir_reg(struct otx2_eth_dev *dev,
212 struct otx2_nix_tm_node *tm_node,
213 struct shaper_params *cir,
214 struct shaper_params *pir)
216 uint32_t shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
217 struct otx2_nix_tm_shaper_profile *shaper_profile = NULL;
218 struct rte_tm_shaper_params *param;
220 shaper_profile_id = tm_node->params.shaper_profile_id;
222 shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);
223 if (shaper_profile) {
224 param = &shaper_profile->profile;
225 /* Calculate CIR exponent and mantissa */
226 if (param->committed.rate)
227 cir->rate = lx_shaper_rate_to_nix(CCLK_HZ,
229 param->committed.rate,
234 /* Calculate PIR exponent and mantissa */
235 if (param->peak.rate)
236 pir->rate = lx_shaper_rate_to_nix(CCLK_HZ,
243 /* Calculate CIR burst exponent and mantissa */
244 if (param->committed.size)
245 cir->burst = shaper_burst_to_nix(param->committed.size,
246 &cir->burst_exponent,
247 &cir->burst_mantissa);
249 /* Calculate PIR burst exponent and mantissa */
250 if (param->peak.size)
251 pir->burst = shaper_burst_to_nix(param->peak.size,
252 &pir->burst_exponent,
253 &pir->burst_mantissa);
260 send_tm_reqval(struct otx2_mbox *mbox, struct nix_txschq_config *req)
264 if (req->num_regs > MAX_REGS_PER_MBOX_MSG)
267 rc = otx2_mbox_process(mbox);
276 populate_tm_registers(struct otx2_eth_dev *dev,
277 struct otx2_nix_tm_node *tm_node)
279 uint64_t strict_schedul_prio, rr_prio;
280 struct otx2_mbox *mbox = dev->mbox;
281 volatile uint64_t *reg, *regval;
282 uint64_t parent = 0, child = 0;
283 struct shaper_params cir, pir;
284 struct nix_txschq_config *req;
290 memset(&cir, 0, sizeof(cir));
291 memset(&pir, 0, sizeof(pir));
293 /* Skip leaf nodes */
294 if (tm_node->hw_lvl_id == NIX_TXSCH_LVL_CNT)
297 /* Root node will not have a parent node */
298 if (tm_node->hw_lvl_id == dev->otx2_tm_root_lvl)
299 parent = tm_node->parent_hw_id;
301 parent = tm_node->parent->hw_id;
303 /* Do we need this trigger to configure TL1 */
304 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
305 tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) {
308 * Default config for TL1.
309 * For VF this is always ignored.
312 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
313 req->lvl = NIX_TXSCH_LVL_TL1;
315 /* Set DWRR quantum */
316 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
317 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
320 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
321 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
324 req->reg[2] = NIX_AF_TL1X_CIR(schq);
328 rc = send_tm_reqval(mbox, req);
333 if (tm_node->hw_lvl_id != NIX_TXSCH_LVL_SMQ)
334 child = find_prio_anchor(dev, tm_node->id);
336 rr_prio = tm_node->rr_prio;
337 hw_lvl = tm_node->hw_lvl_id;
338 strict_schedul_prio = tm_node->priority;
339 schq = tm_node->hw_id;
340 rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX) /
343 configure_shaper_cir_pir_reg(dev, tm_node, &cir, &pir);
345 otx2_tm_dbg("Configure node %p, lvl %u hw_lvl %u, id %u, hw_id %u,"
346 "parent_hw_id %" PRIx64 ", pir %" PRIx64 ", cir %" PRIx64,
347 tm_node, tm_node->level_id, hw_lvl,
348 tm_node->id, schq, parent, pir.rate, cir.rate);
353 case NIX_TXSCH_LVL_SMQ:
354 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
357 regval = req->regval;
360 /* Set xoff which will be cleared later */
361 *reg++ = NIX_AF_SMQX_CFG(schq);
362 *regval++ = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) |
363 (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
365 *reg++ = NIX_AF_MDQX_PARENT(schq);
366 *regval++ = parent << 16;
368 *reg++ = NIX_AF_MDQX_SCHEDULE(schq);
369 *regval++ = (strict_schedul_prio << 24) | rr_quantum;
371 if (pir.rate && pir.burst) {
372 *reg++ = NIX_AF_MDQX_PIR(schq);
373 *regval++ = shaper2regval(&pir) | 1;
377 if (cir.rate && cir.burst) {
378 *reg++ = NIX_AF_MDQX_CIR(schq);
379 *regval++ = shaper2regval(&cir) | 1;
383 rc = send_tm_reqval(mbox, req);
387 case NIX_TXSCH_LVL_TL4:
388 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
392 regval = req->regval;
394 *reg++ = NIX_AF_TL4X_PARENT(schq);
395 *regval++ = parent << 16;
397 *reg++ = NIX_AF_TL4X_TOPOLOGY(schq);
398 *regval++ = (child << 32) | (rr_prio << 1);
400 *reg++ = NIX_AF_TL4X_SCHEDULE(schq);
401 *regval++ = (strict_schedul_prio << 24) | rr_quantum;
403 if (pir.rate && pir.burst) {
404 *reg++ = NIX_AF_TL4X_PIR(schq);
405 *regval++ = shaper2regval(&pir) | 1;
408 if (cir.rate && cir.burst) {
409 *reg++ = NIX_AF_TL4X_CIR(schq);
410 *regval++ = shaper2regval(&cir) | 1;
413 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
414 if (otx2_dev_is_sdp(dev)) {
415 *reg++ = NIX_AF_TL4X_SDP_LINK_CFG(schq);
416 *regval++ = BIT_ULL(12);
420 rc = send_tm_reqval(mbox, req);
424 case NIX_TXSCH_LVL_TL3:
425 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
429 regval = req->regval;
431 *reg++ = NIX_AF_TL3X_PARENT(schq);
432 *regval++ = parent << 16;
434 *reg++ = NIX_AF_TL3X_TOPOLOGY(schq);
435 *regval++ = (child << 32) | (rr_prio << 1);
437 *reg++ = NIX_AF_TL3X_SCHEDULE(schq);
438 *regval++ = (strict_schedul_prio << 24) | rr_quantum;
440 if (pir.rate && pir.burst) {
441 *reg++ = NIX_AF_TL3X_PIR(schq);
442 *regval++ = shaper2regval(&pir) | 1;
445 if (cir.rate && cir.burst) {
446 *reg++ = NIX_AF_TL3X_CIR(schq);
447 *regval++ = shaper2regval(&cir) | 1;
451 rc = send_tm_reqval(mbox, req);
455 case NIX_TXSCH_LVL_TL2:
456 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
460 regval = req->regval;
462 *reg++ = NIX_AF_TL2X_PARENT(schq);
463 *regval++ = parent << 16;
465 *reg++ = NIX_AF_TL2X_TOPOLOGY(schq);
466 *regval++ = (child << 32) | (rr_prio << 1);
468 *reg++ = NIX_AF_TL2X_SCHEDULE(schq);
469 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2)
470 *regval++ = (1 << 24) | rr_quantum;
472 *regval++ = (strict_schedul_prio << 24) | rr_quantum;
474 if (!otx2_dev_is_sdp(dev)) {
475 *reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
477 *regval++ = BIT_ULL(12) | nix_get_relchan(dev);
480 if (pir.rate && pir.burst) {
481 *reg++ = NIX_AF_TL2X_PIR(schq);
482 *regval++ = shaper2regval(&pir) | 1;
485 if (cir.rate && cir.burst) {
486 *reg++ = NIX_AF_TL2X_CIR(schq);
487 *regval++ = shaper2regval(&cir) | 1;
491 rc = send_tm_reqval(mbox, req);
495 case NIX_TXSCH_LVL_TL1:
496 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
500 regval = req->regval;
502 *reg++ = NIX_AF_TL1X_SCHEDULE(schq);
503 *regval++ = rr_quantum;
505 *reg++ = NIX_AF_TL1X_TOPOLOGY(schq);
506 *regval++ = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
508 if (cir.rate && cir.burst) {
509 *reg++ = NIX_AF_TL1X_CIR(schq);
510 *regval++ = shaper2regval(&cir) | 1;
514 rc = send_tm_reqval(mbox, req);
522 otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
528 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
530 struct otx2_nix_tm_node *tm_node;
534 for (lvl = 0; lvl < (uint32_t)dev->otx2_tm_root_lvl + 1; lvl++) {
535 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
536 if (tm_node->hw_lvl_id == lvl) {
537 rc = populate_tm_registers(dev, tm_node);
547 static struct otx2_nix_tm_node *
548 nix_tm_node_search(struct otx2_eth_dev *dev,
549 uint32_t node_id, bool user)
551 struct otx2_nix_tm_node *tm_node;
553 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
554 if (tm_node->id == node_id &&
555 (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
562 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
564 struct otx2_nix_tm_node *tm_node;
567 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
568 if (!tm_node->parent)
571 if (!(tm_node->parent->id == parent_id))
574 if (tm_node->priority == priority)
581 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
583 struct otx2_nix_tm_node *tm_node_child;
584 struct otx2_nix_tm_node *tm_node;
585 struct otx2_nix_tm_node *parent;
589 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
590 if (!tm_node->parent)
592 /* Count group of children of same priority i.e are RR */
593 parent = tm_node->parent;
594 priority = tm_node->priority;
595 rr_num = check_rr(dev, priority, parent->id);
597 /* Assuming that multiple RR groups are
598 * not configured based on capability.
601 parent->rr_prio = priority;
602 parent->rr_num = rr_num;
605 /* Find out static priority children that are not in RR */
606 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
607 if (!tm_node_child->parent)
609 if (parent->id != tm_node_child->parent->id)
611 if (parent->max_prio == UINT32_MAX &&
612 tm_node_child->priority != parent->rr_prio)
613 parent->max_prio = 0;
615 if (parent->max_prio < tm_node_child->priority &&
616 parent->rr_prio != tm_node_child->priority)
617 parent->max_prio = tm_node_child->priority;
625 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
626 uint32_t parent_node_id, uint32_t priority,
627 uint32_t weight, uint16_t hw_lvl_id,
628 uint16_t level_id, bool user,
629 struct rte_tm_node_params *params)
631 struct otx2_nix_tm_shaper_profile *shaper_profile;
632 struct otx2_nix_tm_node *tm_node, *parent_node;
633 uint32_t shaper_profile_id;
635 shaper_profile_id = params->shaper_profile_id;
636 shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);
638 parent_node = nix_tm_node_search(dev, parent_node_id, user);
640 tm_node = rte_zmalloc("otx2_nix_tm_node",
641 sizeof(struct otx2_nix_tm_node), 0);
645 tm_node->level_id = level_id;
646 tm_node->hw_lvl_id = hw_lvl_id;
648 tm_node->id = node_id;
649 tm_node->priority = priority;
650 tm_node->weight = weight;
651 tm_node->rr_prio = 0xf;
652 tm_node->max_prio = UINT32_MAX;
653 tm_node->hw_id = UINT32_MAX;
656 tm_node->flags = NIX_TM_NODE_USER;
657 rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
660 shaper_profile->reference_count++;
661 tm_node->parent = parent_node;
662 tm_node->parent_hw_id = UINT32_MAX;
664 TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
670 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
672 struct otx2_nix_tm_shaper_profile *shaper_profile;
674 while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
675 if (shaper_profile->reference_count)
676 otx2_tm_dbg("Shaper profile %u has non zero references",
677 shaper_profile->shaper_profile_id);
678 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
679 rte_free(shaper_profile);
686 nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable)
688 struct otx2_mbox *mbox = dev->mbox;
689 struct nix_txschq_config *req;
691 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
692 req->lvl = NIX_TXSCH_LVL_SMQ;
695 req->reg[0] = NIX_AF_SMQX_CFG(smq);
696 /* Unmodified fields */
697 req->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) |
698 (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
701 req->regval[0] |= BIT_ULL(50) | BIT_ULL(49);
705 return otx2_mbox_process(mbox);
709 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
711 struct otx2_eth_txq *txq = __txq;
712 struct npa_aq_enq_req *req;
713 struct npa_aq_enq_rsp *rsp;
714 struct otx2_npa_lf *lf;
715 struct otx2_mbox *mbox;
716 uint64_t aura_handle;
719 lf = otx2_npa_lf_obj_get();
723 /* Set/clear sqb aura fc_ena */
724 aura_handle = txq->sqb_pool->pool_id;
725 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
727 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
728 req->ctype = NPA_AQ_CTYPE_AURA;
729 req->op = NPA_AQ_INSTOP_WRITE;
730 /* Below is not needed for aura writes but AF driver needs it */
731 /* AF will translate to associated poolctx */
732 req->aura.pool_addr = req->aura_id;
734 req->aura.fc_ena = enable;
735 req->aura_mask.fc_ena = 1;
737 rc = otx2_mbox_process(mbox);
741 /* Read back npa aura ctx */
742 req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
744 req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
745 req->ctype = NPA_AQ_CTYPE_AURA;
746 req->op = NPA_AQ_INSTOP_READ;
748 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
752 /* Init when enabled as there might be no triggers */
754 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
756 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
757 /* Sync write barrier */
764 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
766 uint16_t sqb_cnt, head_off, tail_off;
767 struct otx2_eth_dev *dev = txq->dev;
768 uint16_t sq = txq->sq;
773 reg = ((uint64_t)sq << 32);
774 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
775 val = otx2_atomic64_add_nosync(reg, regaddr);
777 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
778 val = otx2_atomic64_add_nosync(reg, regaddr);
779 sqb_cnt = val & 0xFFFF;
780 head_off = (val >> 20) & 0x3F;
781 tail_off = (val >> 28) & 0x3F;
783 /* SQ reached quiescent state */
784 if (sqb_cnt <= 1 && head_off == tail_off &&
785 (*txq->fc_mem == txq->nb_sqb_bufs)) {
794 otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)
796 struct otx2_eth_txq *txq = __txq;
797 struct otx2_eth_dev *dev = txq->dev;
798 struct otx2_mbox *mbox = dev->mbox;
799 struct nix_aq_enq_req *req;
800 struct nix_aq_enq_rsp *rsp;
804 /* Get smq from sq */
805 req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
807 req->ctype = NIX_AQ_CTYPE_SQ;
808 req->op = NIX_AQ_INSTOP_READ;
809 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
811 otx2_err("Failed to get smq, rc=%d", rc);
815 /* Check if sq is enabled */
821 /* Enable CGX RXTX to drain pkts */
823 rc = otx2_cgx_rxtx_start(dev);
828 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
830 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
834 /* Disable smq xoff for case it was enabled earlier */
835 rc = nix_smq_xoff(dev, smq, false);
837 otx2_err("Failed to enable smq for sq %u, rc=%d", txq->sq, rc);
841 /* Wait for sq entries to be flushed */
842 nix_txq_flush_sq_spin(txq);
844 /* Flush and enable smq xoff */
845 rc = nix_smq_xoff(dev, smq, true);
847 otx2_err("Failed to disable smq for sq %u, rc=%d", txq->sq, rc);
852 /* Restore cgx state */
854 rc |= otx2_cgx_rxtx_stop(dev);
860 nix_tm_sw_xon(struct otx2_eth_txq *txq,
861 uint16_t smq, uint32_t rr_quantum)
863 struct otx2_eth_dev *dev = txq->dev;
864 struct otx2_mbox *mbox = dev->mbox;
865 struct nix_aq_enq_req *req;
868 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum %u",
869 txq->sq, txq->sq, rr_quantum);
870 /* Set smq from sq */
871 req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
873 req->ctype = NIX_AQ_CTYPE_SQ;
874 req->op = NIX_AQ_INSTOP_WRITE;
876 req->sq.smq_rr_quantum = rr_quantum;
877 req->sq_mask.smq = ~req->sq_mask.smq;
878 req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
880 rc = otx2_mbox_process(mbox);
882 otx2_err("Failed to set smq, rc=%d", rc);
886 /* Enable sqb_aura fc */
887 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
889 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
893 /* Disable smq xoff */
894 rc = nix_smq_xoff(dev, smq, false);
896 otx2_err("Failed to enable smq for sq %u", txq->sq);
904 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
905 uint32_t flags, bool hw_only)
907 struct otx2_nix_tm_shaper_profile *shaper_profile;
908 struct otx2_nix_tm_node *tm_node, *next_node;
909 struct otx2_mbox *mbox = dev->mbox;
910 struct nix_txsch_free_req *req;
911 uint32_t shaper_profile_id;
912 bool skip_node = false;
915 next_node = TAILQ_FIRST(&dev->node_list);
918 next_node = TAILQ_NEXT(tm_node, node);
920 /* Check for only requested nodes */
921 if ((tm_node->flags & flags_mask) != flags)
924 if (nix_tm_have_tl1_access(dev) &&
925 tm_node->hw_lvl_id == NIX_TXSCH_LVL_TL1)
928 otx2_tm_dbg("Free hwres for node %u, hwlvl %u, hw_id %u (%p)",
929 tm_node->id, tm_node->hw_lvl_id,
930 tm_node->hw_id, tm_node);
931 /* Free specific HW resource if requested */
932 if (!skip_node && flags_mask &&
933 tm_node->flags & NIX_TM_NODE_HWRES) {
934 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
936 req->schq_lvl = tm_node->hw_lvl_id;
937 req->schq = tm_node->hw_id;
938 rc = otx2_mbox_process(mbox);
944 tm_node->flags &= ~NIX_TM_NODE_HWRES;
946 /* Leave software elements if needed */
950 shaper_profile_id = tm_node->params.shaper_profile_id;
952 nix_tm_shaper_profile_search(dev, shaper_profile_id);
954 shaper_profile->reference_count--;
956 TAILQ_REMOVE(&dev->node_list, tm_node, node);
961 /* Free all hw resources */
962 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
963 req->flags = TXSCHQ_FREE_ALL;
965 return otx2_mbox_process(mbox);
972 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
973 struct nix_txsch_alloc_rsp *rsp)
978 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
979 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
980 dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
981 dev->txschq_contig_list[lvl][schq] =
982 rsp->schq_contig_list[lvl][schq];
985 dev->txschq[lvl] = rsp->schq[lvl];
986 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
992 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
993 struct otx2_nix_tm_node *child,
994 struct otx2_nix_tm_node *parent)
996 uint32_t hw_id, schq_con_index, prio_offset;
997 uint32_t l_id, schq_index;
999 otx2_tm_dbg("Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)",
1000 child->id, child->level_id, child->hw_lvl_id, child);
1002 child->flags |= NIX_TM_NODE_HWRES;
1004 /* Process root nodes */
1005 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1006 child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) {
1008 uint32_t tschq_con_index;
1010 l_id = child->hw_lvl_id;
1011 tschq_con_index = dev->txschq_contig_index[l_id];
1012 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1013 child->hw_id = hw_id;
1014 dev->txschq_contig_index[l_id]++;
1015 /* Update TL1 hw_id for its parent for config purpose */
1016 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1017 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1018 child->parent_hw_id = hw_id;
1021 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1022 child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) {
1023 uint32_t tschq_con_index;
1025 l_id = child->hw_lvl_id;
1026 tschq_con_index = dev->txschq_index[l_id];
1027 hw_id = dev->txschq_list[l_id][tschq_con_index];
1028 child->hw_id = hw_id;
1029 dev->txschq_index[l_id]++;
1033 /* Process children with parents */
1034 l_id = child->hw_lvl_id;
1035 schq_index = dev->txschq_index[l_id];
1036 schq_con_index = dev->txschq_contig_index[l_id];
1038 if (child->priority == parent->rr_prio) {
1039 hw_id = dev->txschq_list[l_id][schq_index];
1040 child->hw_id = hw_id;
1041 child->parent_hw_id = parent->hw_id;
1042 dev->txschq_index[l_id]++;
1044 prio_offset = schq_con_index + child->priority;
1045 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1046 child->hw_id = hw_id;
1052 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1054 struct otx2_nix_tm_node *parent, *child;
1055 uint32_t child_hw_lvl, con_index_inc, i;
1057 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1058 TAILQ_FOREACH(parent, &dev->node_list, node) {
1059 child_hw_lvl = parent->hw_lvl_id - 1;
1060 if (parent->hw_lvl_id != i)
1062 TAILQ_FOREACH(child, &dev->node_list, node) {
1065 if (child->parent->id != parent->id)
1067 nix_tm_assign_id_to_node(dev, child, parent);
1070 con_index_inc = parent->max_prio + 1;
1071 dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1074 * Explicitly assign id to parent node if it
1075 * doesn't have a parent
1077 if (parent->hw_lvl_id == dev->otx2_tm_root_lvl)
1078 nix_tm_assign_id_to_node(dev, parent, NULL);
1085 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1086 struct nix_txsch_alloc_req *req, uint8_t lvl)
1088 struct otx2_nix_tm_node *tm_node;
1089 uint8_t contig_count;
1091 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1092 if (lvl == tm_node->hw_lvl_id) {
1093 req->schq[lvl - 1] += tm_node->rr_num;
1094 if (tm_node->max_prio != UINT32_MAX) {
1095 contig_count = tm_node->max_prio + 1;
1096 req->schq_contig[lvl - 1] += contig_count;
1099 if (lvl == dev->otx2_tm_root_lvl &&
1100 dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1101 tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) {
1102 req->schq_contig[dev->otx2_tm_root_lvl]++;
1106 req->schq[NIX_TXSCH_LVL_TL1] = 1;
1107 req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1113 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1114 struct nix_txsch_alloc_req *req)
1118 for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1119 nix_tm_count_req_schq(dev, req, i);
1121 for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1122 dev->txschq_index[i] = 0;
1123 dev->txschq_contig_index[i] = 0;
1129 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1131 struct otx2_mbox *mbox = dev->mbox;
1132 struct nix_txsch_alloc_req *req;
1133 struct nix_txsch_alloc_rsp *rsp;
1136 req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1138 rc = nix_tm_prepare_txschq_req(dev, req);
1142 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1146 nix_tm_copy_rsp_to_dev(dev, rsp);
1148 nix_tm_assign_hw_id(dev);
1153 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1155 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1156 struct otx2_nix_tm_node *tm_node;
1157 uint16_t sq, smq, rr_quantum;
1158 struct otx2_eth_txq *txq;
1161 nix_tm_update_parent_info(dev);
1163 rc = nix_tm_send_txsch_alloc_msg(dev);
1165 otx2_err("TM failed to alloc tm resources=%d", rc);
1169 rc = nix_tm_txsch_reg_config(dev);
1171 otx2_err("TM failed to configure sched registers=%d", rc);
1175 /* Enable xmit as all the topology is ready */
1176 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1177 if (tm_node->flags & NIX_TM_NODE_ENABLED)
1180 /* Enable xmit on sq */
1181 if (tm_node->level_id != OTX2_TM_LVL_QUEUE) {
1182 tm_node->flags |= NIX_TM_NODE_ENABLED;
1186 /* Don't enable SMQ or mark as enable */
1191 if (sq > eth_dev->data->nb_tx_queues) {
1196 txq = eth_dev->data->tx_queues[sq];
1198 smq = tm_node->parent->hw_id;
1199 rr_quantum = (tm_node->weight *
1200 NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT;
1202 rc = nix_tm_sw_xon(txq, smq, rr_quantum);
1205 tm_node->flags |= NIX_TM_NODE_ENABLED;
1209 otx2_err("TM failed to enable xmit on sq %u, rc=%d", sq, rc);
1215 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
1217 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1218 uint32_t def = eth_dev->data->nb_tx_queues;
1219 struct rte_tm_node_params params;
1220 uint32_t leaf_parent, i;
1223 /* Default params */
1224 memset(¶ms, 0, sizeof(params));
1225 params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
1227 if (nix_tm_have_tl1_access(dev)) {
1228 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
1229 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1232 OTX2_TM_LVL_ROOT, false, ¶ms);
1235 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1238 OTX2_TM_LVL_SCH1, false, ¶ms);
1242 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1245 OTX2_TM_LVL_SCH2, false, ¶ms);
1249 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1252 OTX2_TM_LVL_SCH3, false, ¶ms);
1256 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
1259 OTX2_TM_LVL_SCH4, false, ¶ms);
1263 leaf_parent = def + 4;
1265 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
1266 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1269 OTX2_TM_LVL_ROOT, false, ¶ms);
1273 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1276 OTX2_TM_LVL_SCH1, false, ¶ms);
1280 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1283 OTX2_TM_LVL_SCH2, false, ¶ms);
1287 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1290 OTX2_TM_LVL_SCH3, false, ¶ms);
1294 leaf_parent = def + 3;
1297 /* Add leaf nodes */
1298 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1299 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
1302 OTX2_TM_LVL_QUEUE, false, ¶ms);
1311 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
1313 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1315 TAILQ_INIT(&dev->node_list);
1316 TAILQ_INIT(&dev->shaper_profile_list);
1319 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
1321 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1322 uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1325 /* Free up all resources already held */
1326 rc = nix_tm_free_resources(dev, 0, 0, false);
1328 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1332 /* Clear shaper profiles */
1333 nix_tm_clear_shaper_profiles(dev);
1334 dev->tm_flags = NIX_TM_DEFAULT_TREE;
1336 rc = nix_tm_prepare_default_tree(eth_dev);
1340 rc = nix_tm_alloc_resources(eth_dev, false);
1343 dev->tm_leaf_cnt = sq_cnt;
1349 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
1351 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1354 /* Xmit is assumed to be disabled */
1355 /* Free up resources already held */
1356 rc = nix_tm_free_resources(dev, 0, 0, false);
1358 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1362 /* Clear shaper profiles */
1363 nix_tm_clear_shaper_profiles(dev);
1370 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
1371 uint32_t *rr_quantum, uint16_t *smq)
1373 struct otx2_nix_tm_node *tm_node;
1376 /* 0..sq_cnt-1 are leaf nodes */
1377 if (sq >= dev->tm_leaf_cnt)
1380 /* Search for internal node first */
1381 tm_node = nix_tm_node_search(dev, sq, false);
1383 tm_node = nix_tm_node_search(dev, sq, true);
1385 /* Check if we found a valid leaf node */
1386 if (!tm_node || tm_node->level_id != OTX2_TM_LVL_QUEUE ||
1387 !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
1391 /* Get SMQ Id of leaf node's parent */
1392 *smq = tm_node->parent->hw_id;
1393 *rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX)
1396 rc = nix_smq_xoff(dev, *smq, false);
1399 tm_node->flags |= NIX_TM_NODE_ENABLED;