net/octeontx2: update red algo for shaper dynamic update
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_tm.h"
9
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
12
13 enum otx2_tm_node_level {
14         OTX2_TM_LVL_ROOT = 0,
15         OTX2_TM_LVL_SCH1,
16         OTX2_TM_LVL_SCH2,
17         OTX2_TM_LVL_SCH3,
18         OTX2_TM_LVL_SCH4,
19         OTX2_TM_LVL_QUEUE,
20         OTX2_TM_LVL_MAX,
21 };
22
23 static inline
24 uint64_t shaper2regval(struct shaper_params *shaper)
25 {
26         return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27                 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28                 (shaper->mantissa << 1);
29 }
30
31 int
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
33 {
34         int link = 13 /* SDP */;
35         uint16_t lmac_chan;
36         uint16_t map;
37
38         lmac_chan = dev->tx_chan_base;
39
40         /* CGX lmac link */
41         if (lmac_chan >= 0x800) {
42                 map = lmac_chan & 0x7FF;
43                 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44         } else if (lmac_chan < 0x700) {
45                 /* LBK channel */
46                 link = 12;
47         }
48
49         return link;
50 }
51
52 static uint8_t
53 nix_get_relchan(struct otx2_eth_dev *dev)
54 {
55         return dev->tx_chan_base & 0xff;
56 }
57
58 static bool
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
60 {
61         bool is_lbk = otx2_dev_is_lbk(dev);
62         return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
63 }
64
65 static bool
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
67 {
68         if (nix_tm_have_tl1_access(dev))
69                 return (lvl == OTX2_TM_LVL_QUEUE);
70
71         return (lvl == OTX2_TM_LVL_SCH4);
72 }
73
74 static int
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
76 {
77         struct otx2_nix_tm_node *child_node;
78
79         TAILQ_FOREACH(child_node, &dev->node_list, node) {
80                 if (!child_node->parent)
81                         continue;
82                 if (!(child_node->parent->id == node_id))
83                         continue;
84                 if (child_node->priority == child_node->parent->rr_prio)
85                         continue;
86                 return child_node->hw_id - child_node->priority;
87         }
88         return 0;
89 }
90
91
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
94 {
95         struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
96
97         TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98                 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99                         return tm_shaper_profile;
100         }
101         return NULL;
102 }
103
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106                    uint64_t *mantissa_p, uint64_t *div_exp_p)
107 {
108         uint64_t div_exp, exponent, mantissa;
109
110         /* Boundary checks */
111         if (value < MIN_SHAPER_RATE ||
112             value > MAX_SHAPER_RATE)
113                 return 0;
114
115         if (value <= SHAPER_RATE(0, 0, 0)) {
116                 /* Calculate rate div_exp and mantissa using
117                  * the following formula:
118                  *
119                  * value = (2E6 * (256 + mantissa)
120                  *              / ((1 << div_exp) * 256))
121                  */
122                 div_exp = 0;
123                 exponent = 0;
124                 mantissa = MAX_RATE_MANTISSA;
125
126                 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
127                         div_exp += 1;
128
129                 while (value <
130                        ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131                         ((1 << div_exp) * 256)))
132                         mantissa -= 1;
133         } else {
134                 /* Calculate rate exponent and mantissa using
135                  * the following formula:
136                  *
137                  * value = (2E6 * ((256 + mantissa) << exponent)) / 256
138                  *
139                  */
140                 div_exp = 0;
141                 exponent = MAX_RATE_EXPONENT;
142                 mantissa = MAX_RATE_MANTISSA;
143
144                 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
145                         exponent -= 1;
146
147                 while (value < ((NIX_SHAPER_RATE_CONST *
148                                 ((256 + mantissa) << exponent)) / 256))
149                         mantissa -= 1;
150         }
151
152         if (div_exp > MAX_RATE_DIV_EXP ||
153             exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
154                 return 0;
155
156         if (div_exp_p)
157                 *div_exp_p = div_exp;
158         if (exponent_p)
159                 *exponent_p = exponent;
160         if (mantissa_p)
161                 *mantissa_p = mantissa;
162
163         /* Calculate real rate value */
164         return SHAPER_RATE(exponent, mantissa, div_exp);
165 }
166
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169                     uint64_t *mantissa_p)
170 {
171         uint64_t exponent, mantissa;
172
173         if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
174                 return 0;
175
176         /* Calculate burst exponent and mantissa using
177          * the following formula:
178          *
179          * value = (((256 + mantissa) << (exponent + 1)
180          / 256)
181          *
182          */
183         exponent = MAX_BURST_EXPONENT;
184         mantissa = MAX_BURST_MANTISSA;
185
186         while (value < (1ull << (exponent + 1)))
187                 exponent -= 1;
188
189         while (value < ((256 + mantissa) << (exponent + 1)) / 256)
190                 mantissa -= 1;
191
192         if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
193                 return 0;
194
195         if (exponent_p)
196                 *exponent_p = exponent;
197         if (mantissa_p)
198                 *mantissa_p = mantissa;
199
200         return SHAPER_BURST(exponent, mantissa);
201 }
202
203 static void
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205                      struct shaper_params *cir,
206                      struct shaper_params *pir)
207 {
208         struct rte_tm_shaper_params *param = &profile->params;
209
210         if (!profile)
211                 return;
212
213         /* Calculate CIR exponent and mantissa */
214         if (param->committed.rate)
215                 cir->rate = shaper_rate_to_nix(param->committed.rate,
216                                                &cir->exponent,
217                                                &cir->mantissa,
218                                                &cir->div_exp);
219
220         /* Calculate PIR exponent and mantissa */
221         if (param->peak.rate)
222                 pir->rate = shaper_rate_to_nix(param->peak.rate,
223                                                &pir->exponent,
224                                                &pir->mantissa,
225                                                &pir->div_exp);
226
227         /* Calculate CIR burst exponent and mantissa */
228         if (param->committed.size)
229                 cir->burst = shaper_burst_to_nix(param->committed.size,
230                                                  &cir->burst_exponent,
231                                                  &cir->burst_mantissa);
232
233         /* Calculate PIR burst exponent and mantissa */
234         if (param->peak.size)
235                 pir->burst = shaper_burst_to_nix(param->peak.size,
236                                                  &pir->burst_exponent,
237                                                  &pir->burst_mantissa);
238 }
239
240 static void
241 shaper_default_red_algo(struct otx2_eth_dev *dev,
242                         struct otx2_nix_tm_node *tm_node,
243                         struct otx2_nix_tm_shaper_profile *profile)
244 {
245         struct shaper_params cir, pir;
246
247         /* C0 doesn't support STALL when both PIR & CIR are enabled */
248         if (profile && otx2_dev_is_96xx_Cx(dev)) {
249                 memset(&cir, 0, sizeof(cir));
250                 memset(&pir, 0, sizeof(pir));
251                 shaper_config_to_nix(profile, &cir, &pir);
252
253                 if (pir.rate && cir.rate) {
254                         tm_node->red_algo = NIX_REDALG_DISCARD;
255                         tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
256                         return;
257                 }
258         }
259
260         tm_node->red_algo = NIX_REDALG_STD;
261         tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
262 }
263
264 static int
265 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
266 {
267         struct otx2_mbox *mbox = dev->mbox;
268         struct nix_txschq_config *req;
269
270         /*
271          * Default config for TL1.
272          * For VF this is always ignored.
273          */
274
275         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
276         req->lvl = NIX_TXSCH_LVL_TL1;
277
278         /* Set DWRR quantum */
279         req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
280         req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
281         req->num_regs++;
282
283         req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
284         req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
285         req->num_regs++;
286
287         req->reg[2] = NIX_AF_TL1X_CIR(schq);
288         req->regval[2] = 0;
289         req->num_regs++;
290
291         return otx2_mbox_process(mbox);
292 }
293
294 static uint8_t
295 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
296                      struct otx2_nix_tm_node *tm_node,
297                      volatile uint64_t *reg, volatile uint64_t *regval)
298 {
299         uint64_t strict_prio = tm_node->priority;
300         uint32_t hw_lvl = tm_node->hw_lvl;
301         uint32_t schq = tm_node->hw_id;
302         uint64_t rr_quantum;
303         uint8_t k = 0;
304
305         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
306
307         /* For children to root, strict prio is default if either
308          * device root is TL2 or TL1 Static Priority is disabled.
309          */
310         if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
311             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
312              dev->tm_flags & NIX_TM_TL1_NO_SP))
313                 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
314
315         otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
316                      "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
317                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
318                      tm_node->id, strict_prio, rr_quantum, tm_node);
319
320         switch (hw_lvl) {
321         case NIX_TXSCH_LVL_SMQ:
322                 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
323                 regval[k] = (strict_prio << 24) | rr_quantum;
324                 k++;
325
326                 break;
327         case NIX_TXSCH_LVL_TL4:
328                 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
329                 regval[k] = (strict_prio << 24) | rr_quantum;
330                 k++;
331
332                 break;
333         case NIX_TXSCH_LVL_TL3:
334                 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
335                 regval[k] = (strict_prio << 24) | rr_quantum;
336                 k++;
337
338                 break;
339         case NIX_TXSCH_LVL_TL2:
340                 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
341                 regval[k] = (strict_prio << 24) | rr_quantum;
342                 k++;
343
344                 break;
345         case NIX_TXSCH_LVL_TL1:
346                 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
347                 regval[k] = rr_quantum;
348                 k++;
349
350                 break;
351         }
352
353         return k;
354 }
355
356 static uint8_t
357 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
358                       struct otx2_nix_tm_shaper_profile *profile,
359                       volatile uint64_t *reg, volatile uint64_t *regval)
360 {
361         struct shaper_params cir, pir;
362         uint32_t schq = tm_node->hw_id;
363         uint8_t k = 0;
364
365         memset(&cir, 0, sizeof(cir));
366         memset(&pir, 0, sizeof(pir));
367         shaper_config_to_nix(profile, &cir, &pir);
368
369         otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
370                     "pir %" PRIu64 "(%" PRIu64 "B),"
371                      " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
372                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
373                      tm_node->id, pir.rate, pir.burst,
374                      cir.rate, cir.burst, tm_node);
375
376         switch (tm_node->hw_lvl) {
377         case NIX_TXSCH_LVL_SMQ:
378                 /* Configure PIR, CIR */
379                 reg[k] = NIX_AF_MDQX_PIR(schq);
380                 regval[k] = (pir.rate && pir.burst) ?
381                                 (shaper2regval(&pir) | 1) : 0;
382                 k++;
383
384                 reg[k] = NIX_AF_MDQX_CIR(schq);
385                 regval[k] = (cir.rate && cir.burst) ?
386                                 (shaper2regval(&cir) | 1) : 0;
387                 k++;
388
389                 /* Configure RED ALG */
390                 reg[k] = NIX_AF_MDQX_SHAPE(schq);
391                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
392                 k++;
393                 break;
394         case NIX_TXSCH_LVL_TL4:
395                 /* Configure PIR, CIR */
396                 reg[k] = NIX_AF_TL4X_PIR(schq);
397                 regval[k] = (pir.rate && pir.burst) ?
398                                 (shaper2regval(&pir) | 1) : 0;
399                 k++;
400
401                 reg[k] = NIX_AF_TL4X_CIR(schq);
402                 regval[k] = (cir.rate && cir.burst) ?
403                                 (shaper2regval(&cir) | 1) : 0;
404                 k++;
405
406                 /* Configure RED algo */
407                 reg[k] = NIX_AF_TL4X_SHAPE(schq);
408                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
409                 k++;
410                 break;
411         case NIX_TXSCH_LVL_TL3:
412                 /* Configure PIR, CIR */
413                 reg[k] = NIX_AF_TL3X_PIR(schq);
414                 regval[k] = (pir.rate && pir.burst) ?
415                                 (shaper2regval(&pir) | 1) : 0;
416                 k++;
417
418                 reg[k] = NIX_AF_TL3X_CIR(schq);
419                 regval[k] = (cir.rate && cir.burst) ?
420                                 (shaper2regval(&cir) | 1) : 0;
421                 k++;
422
423                 /* Configure RED algo */
424                 reg[k] = NIX_AF_TL3X_SHAPE(schq);
425                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
426                 k++;
427
428                 break;
429         case NIX_TXSCH_LVL_TL2:
430                 /* Configure PIR, CIR */
431                 reg[k] = NIX_AF_TL2X_PIR(schq);
432                 regval[k] = (pir.rate && pir.burst) ?
433                                 (shaper2regval(&pir) | 1) : 0;
434                 k++;
435
436                 reg[k] = NIX_AF_TL2X_CIR(schq);
437                 regval[k] = (cir.rate && cir.burst) ?
438                                 (shaper2regval(&cir) | 1) : 0;
439                 k++;
440
441                 /* Configure RED algo */
442                 reg[k] = NIX_AF_TL2X_SHAPE(schq);
443                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
444                 k++;
445
446                 break;
447         case NIX_TXSCH_LVL_TL1:
448                 /* Configure CIR */
449                 reg[k] = NIX_AF_TL1X_CIR(schq);
450                 regval[k] = (cir.rate && cir.burst) ?
451                                 (shaper2regval(&cir) | 1) : 0;
452                 k++;
453                 break;
454         }
455
456         return k;
457 }
458
459 static uint8_t
460 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
461                    volatile uint64_t *reg, volatile uint64_t *regval)
462 {
463         uint32_t hw_lvl = tm_node->hw_lvl;
464         uint32_t schq = tm_node->hw_id;
465         uint8_t k = 0;
466
467         otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
468                     nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
469                     tm_node->id, enable, tm_node);
470
471         regval[k] = enable;
472
473         switch (hw_lvl) {
474         case NIX_TXSCH_LVL_MDQ:
475                 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
476                 k++;
477                 break;
478         case NIX_TXSCH_LVL_TL4:
479                 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
480                 k++;
481                 break;
482         case NIX_TXSCH_LVL_TL3:
483                 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
484                 k++;
485                 break;
486         case NIX_TXSCH_LVL_TL2:
487                 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
488                 k++;
489                 break;
490         case NIX_TXSCH_LVL_TL1:
491                 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
492                 k++;
493                 break;
494         default:
495                 break;
496         }
497
498         return k;
499 }
500
501 static int
502 populate_tm_reg(struct otx2_eth_dev *dev,
503                 struct otx2_nix_tm_node *tm_node)
504 {
505         struct otx2_nix_tm_shaper_profile *profile;
506         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
507         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
508         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
509         struct otx2_mbox *mbox = dev->mbox;
510         uint64_t parent = 0, child = 0;
511         uint32_t hw_lvl, rr_prio, schq;
512         struct nix_txschq_config *req;
513         int rc = -EFAULT;
514         uint8_t k = 0;
515
516         memset(regval_mask, 0, sizeof(regval_mask));
517         profile = nix_tm_shaper_profile_search(dev,
518                                         tm_node->params.shaper_profile_id);
519         rr_prio = tm_node->rr_prio;
520         hw_lvl = tm_node->hw_lvl;
521         schq = tm_node->hw_id;
522
523         /* Root node will not have a parent node */
524         if (hw_lvl == dev->otx2_tm_root_lvl)
525                 parent = tm_node->parent_hw_id;
526         else
527                 parent = tm_node->parent->hw_id;
528
529         /* Do we need this trigger to configure TL1 */
530         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
531             hw_lvl == dev->otx2_tm_root_lvl) {
532                 rc = populate_tm_tl1_default(dev, parent);
533                 if (rc)
534                         goto error;
535         }
536
537         if (hw_lvl != NIX_TXSCH_LVL_SMQ)
538                 child = find_prio_anchor(dev, tm_node->id);
539
540         /* Override default rr_prio when TL1
541          * Static Priority is disabled
542          */
543         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
544             dev->tm_flags & NIX_TM_TL1_NO_SP) {
545                 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
546                 child = 0;
547         }
548
549         otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
550                     " prio_anchor %"PRIu64" rr_prio %u (%p)",
551                     nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
552                     parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
553
554         /* Prepare Topology and Link config */
555         switch (hw_lvl) {
556         case NIX_TXSCH_LVL_SMQ:
557
558                 /* Set xoff which will be cleared later */
559                 reg[k] = NIX_AF_SMQX_CFG(schq);
560                 regval[k] = BIT_ULL(50);
561                 regval_mask[k] = ~BIT_ULL(50);
562                 k++;
563
564                 /* Parent and schedule conf */
565                 reg[k] = NIX_AF_MDQX_PARENT(schq);
566                 regval[k] = parent << 16;
567                 k++;
568
569                 break;
570         case NIX_TXSCH_LVL_TL4:
571                 /* Parent and schedule conf */
572                 reg[k] = NIX_AF_TL4X_PARENT(schq);
573                 regval[k] = parent << 16;
574                 k++;
575
576                 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
577                 regval[k] = (child << 32) | (rr_prio << 1);
578                 k++;
579
580                 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
581                 if (otx2_dev_is_sdp(dev)) {
582                         reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
583                         regval[k] = BIT_ULL(12);
584                         k++;
585                 }
586                 break;
587         case NIX_TXSCH_LVL_TL3:
588                 /* Parent and schedule conf */
589                 reg[k] = NIX_AF_TL3X_PARENT(schq);
590                 regval[k] = parent << 16;
591                 k++;
592
593                 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
594                 regval[k] = (child << 32) | (rr_prio << 1);
595                 k++;
596
597                 /* Link configuration */
598                 if (!otx2_dev_is_sdp(dev) &&
599                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
600                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
601                                                 otx2_nix_get_link(dev));
602                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
603                         k++;
604                 }
605
606                 break;
607         case NIX_TXSCH_LVL_TL2:
608                 /* Parent and schedule conf */
609                 reg[k] = NIX_AF_TL2X_PARENT(schq);
610                 regval[k] = parent << 16;
611                 k++;
612
613                 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
614                 regval[k] = (child << 32) | (rr_prio << 1);
615                 k++;
616
617                 /* Link configuration */
618                 if (!otx2_dev_is_sdp(dev) &&
619                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
620                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
621                                                 otx2_nix_get_link(dev));
622                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
623                         k++;
624                 }
625
626                 break;
627         case NIX_TXSCH_LVL_TL1:
628                 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
629                 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
630                 k++;
631
632                 break;
633         }
634
635         /* Prepare schedule config */
636         k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
637
638         /* Prepare shaping config */
639         k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
640
641         if (!k)
642                 return 0;
643
644         /* Copy and send config mbox */
645         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
646         req->lvl = hw_lvl;
647         req->num_regs = k;
648
649         otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
650         otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
651         otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
652
653         rc = otx2_mbox_process(mbox);
654         if (rc)
655                 goto error;
656
657         return 0;
658 error:
659         otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
660         return rc;
661 }
662
663
664 static int
665 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
666 {
667         struct otx2_nix_tm_node *tm_node;
668         uint32_t hw_lvl;
669         int rc = 0;
670
671         for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
672                 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
673                         if (tm_node->hw_lvl == hw_lvl &&
674                             tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
675                                 rc = populate_tm_reg(dev, tm_node);
676                                 if (rc)
677                                         goto exit;
678                         }
679                 }
680         }
681 exit:
682         return rc;
683 }
684
685 static struct otx2_nix_tm_node *
686 nix_tm_node_search(struct otx2_eth_dev *dev,
687                    uint32_t node_id, bool user)
688 {
689         struct otx2_nix_tm_node *tm_node;
690
691         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
692                 if (tm_node->id == node_id &&
693                     (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
694                         return tm_node;
695         }
696         return NULL;
697 }
698
699 static uint32_t
700 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
701 {
702         struct otx2_nix_tm_node *tm_node;
703         uint32_t rr_num = 0;
704
705         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
706                 if (!tm_node->parent)
707                         continue;
708
709                 if (!(tm_node->parent->id == parent_id))
710                         continue;
711
712                 if (tm_node->priority == priority)
713                         rr_num++;
714         }
715         return rr_num;
716 }
717
718 static int
719 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
720 {
721         struct otx2_nix_tm_node *tm_node_child;
722         struct otx2_nix_tm_node *tm_node;
723         struct otx2_nix_tm_node *parent;
724         uint32_t rr_num = 0;
725         uint32_t priority;
726
727         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
728                 if (!tm_node->parent)
729                         continue;
730                 /* Count group of children of same priority i.e are RR */
731                 parent = tm_node->parent;
732                 priority = tm_node->priority;
733                 rr_num = check_rr(dev, priority, parent->id);
734
735                 /* Assuming that multiple RR groups are
736                  * not configured based on capability.
737                  */
738                 if (rr_num > 1) {
739                         parent->rr_prio = priority;
740                         parent->rr_num = rr_num;
741                 }
742
743                 /* Find out static priority children that are not in RR */
744                 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
745                         if (!tm_node_child->parent)
746                                 continue;
747                         if (parent->id != tm_node_child->parent->id)
748                                 continue;
749                         if (parent->max_prio == UINT32_MAX &&
750                             tm_node_child->priority != parent->rr_prio)
751                                 parent->max_prio = 0;
752
753                         if (parent->max_prio < tm_node_child->priority &&
754                             parent->rr_prio != tm_node_child->priority)
755                                 parent->max_prio = tm_node_child->priority;
756                 }
757         }
758
759         return 0;
760 }
761
762 static int
763 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
764                         uint32_t parent_node_id, uint32_t priority,
765                         uint32_t weight, uint16_t hw_lvl,
766                         uint16_t lvl, bool user,
767                         struct rte_tm_node_params *params)
768 {
769         struct otx2_nix_tm_shaper_profile *profile;
770         struct otx2_nix_tm_node *tm_node, *parent_node;
771         uint32_t profile_id;
772
773         profile_id = params->shaper_profile_id;
774         profile = nix_tm_shaper_profile_search(dev, profile_id);
775
776         parent_node = nix_tm_node_search(dev, parent_node_id, user);
777
778         tm_node = rte_zmalloc("otx2_nix_tm_node",
779                               sizeof(struct otx2_nix_tm_node), 0);
780         if (!tm_node)
781                 return -ENOMEM;
782
783         tm_node->lvl = lvl;
784         tm_node->hw_lvl = hw_lvl;
785
786         /* Maintain minimum weight */
787         if (!weight)
788                 weight = 1;
789
790         tm_node->id = node_id;
791         tm_node->priority = priority;
792         tm_node->weight = weight;
793         tm_node->rr_prio = 0xf;
794         tm_node->max_prio = UINT32_MAX;
795         tm_node->hw_id = UINT32_MAX;
796         tm_node->flags = 0;
797         if (user)
798                 tm_node->flags = NIX_TM_NODE_USER;
799         rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
800
801         if (profile)
802                 profile->reference_count++;
803
804         tm_node->parent = parent_node;
805         tm_node->parent_hw_id = UINT32_MAX;
806         shaper_default_red_algo(dev, tm_node, profile);
807
808         TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
809
810         return 0;
811 }
812
813 static int
814 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
815 {
816         struct otx2_nix_tm_shaper_profile *shaper_profile;
817
818         while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
819                 if (shaper_profile->reference_count)
820                         otx2_tm_dbg("Shaper profile %u has non zero references",
821                                     shaper_profile->shaper_profile_id);
822                 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
823                 rte_free(shaper_profile);
824         }
825
826         return 0;
827 }
828
829 static int
830 nix_clear_path_xoff(struct otx2_eth_dev *dev,
831                     struct otx2_nix_tm_node *tm_node)
832 {
833         struct nix_txschq_config *req;
834         struct otx2_nix_tm_node *p;
835         int rc;
836
837         /* Manipulating SW_XOFF not supported on Ax */
838         if (otx2_dev_is_Ax(dev))
839                 return 0;
840
841         /* Enable nodes in path for flush to succeed */
842         if (!nix_tm_is_leaf(dev, tm_node->lvl))
843                 p = tm_node;
844         else
845                 p = tm_node->parent;
846         while (p) {
847                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
848                     (p->flags & NIX_TM_NODE_HWRES)) {
849                         req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
850                         req->lvl = p->hw_lvl;
851                         req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
852                                                            req->regval);
853                         rc = otx2_mbox_process(dev->mbox);
854                         if (rc)
855                                 return rc;
856
857                         p->flags |= NIX_TM_NODE_ENABLED;
858                 }
859                 p = p->parent;
860         }
861
862         return 0;
863 }
864
865 static int
866 nix_smq_xoff(struct otx2_eth_dev *dev,
867              struct otx2_nix_tm_node *tm_node,
868              bool enable)
869 {
870         struct otx2_mbox *mbox = dev->mbox;
871         struct nix_txschq_config *req;
872         uint16_t smq;
873         int rc;
874
875         smq = tm_node->hw_id;
876         otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
877                     enable ? "enable" : "disable");
878
879         rc = nix_clear_path_xoff(dev, tm_node);
880         if (rc)
881                 return rc;
882
883         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
884         req->lvl = NIX_TXSCH_LVL_SMQ;
885         req->num_regs = 1;
886
887         req->reg[0] = NIX_AF_SMQX_CFG(smq);
888         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
889         req->regval_mask[0] = enable ?
890                                 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
891
892         return otx2_mbox_process(mbox);
893 }
894
895 int
896 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
897 {
898         struct otx2_eth_txq *txq = __txq;
899         struct npa_aq_enq_req *req;
900         struct npa_aq_enq_rsp *rsp;
901         struct otx2_npa_lf *lf;
902         struct otx2_mbox *mbox;
903         uint64_t aura_handle;
904         int rc;
905
906         otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
907                     enable ? "enable" : "disable");
908
909         lf = otx2_npa_lf_obj_get();
910         if (!lf)
911                 return -EFAULT;
912         mbox = lf->mbox;
913         /* Set/clear sqb aura fc_ena */
914         aura_handle = txq->sqb_pool->pool_id;
915         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
916
917         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
918         req->ctype = NPA_AQ_CTYPE_AURA;
919         req->op = NPA_AQ_INSTOP_WRITE;
920         /* Below is not needed for aura writes but AF driver needs it */
921         /* AF will translate to associated poolctx */
922         req->aura.pool_addr = req->aura_id;
923
924         req->aura.fc_ena = enable;
925         req->aura_mask.fc_ena = 1;
926
927         rc = otx2_mbox_process(mbox);
928         if (rc)
929                 return rc;
930
931         /* Read back npa aura ctx */
932         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
933
934         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
935         req->ctype = NPA_AQ_CTYPE_AURA;
936         req->op = NPA_AQ_INSTOP_READ;
937
938         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
939         if (rc)
940                 return rc;
941
942         /* Init when enabled as there might be no triggers */
943         if (enable)
944                 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
945         else
946                 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
947         /* Sync write barrier */
948         rte_wmb();
949
950         return 0;
951 }
952
953 static int
954 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
955 {
956         uint16_t sqb_cnt, head_off, tail_off;
957         struct otx2_eth_dev *dev = txq->dev;
958         uint64_t wdata, val, prev;
959         uint16_t sq = txq->sq;
960         int64_t *regaddr;
961         uint64_t timeout;/* 10's of usec */
962
963         /* Wait for enough time based on shaper min rate */
964         timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
965         timeout = timeout / dev->tm_rate_min;
966         if (!timeout)
967                 timeout = 10000;
968
969         wdata = ((uint64_t)sq << 32);
970         regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
971         val = otx2_atomic64_add_nosync(wdata, regaddr);
972
973         /* Spin multiple iterations as "txq->fc_cache_pkts" can still
974          * have space to send pkts even though fc_mem is disabled
975          */
976
977         while (true) {
978                 prev = val;
979                 rte_delay_us(10);
980                 val = otx2_atomic64_add_nosync(wdata, regaddr);
981                 /* Continue on error */
982                 if (val & BIT_ULL(63))
983                         continue;
984
985                 if (prev != val)
986                         continue;
987
988                 sqb_cnt = val & 0xFFFF;
989                 head_off = (val >> 20) & 0x3F;
990                 tail_off = (val >> 28) & 0x3F;
991
992                 /* SQ reached quiescent state */
993                 if (sqb_cnt <= 1 && head_off == tail_off &&
994                     (*txq->fc_mem == txq->nb_sqb_bufs)) {
995                         break;
996                 }
997
998                 /* Timeout */
999                 if (!timeout)
1000                         goto exit;
1001                 timeout--;
1002         }
1003
1004         return 0;
1005 exit:
1006         otx2_nix_tm_dump(dev);
1007         return -EFAULT;
1008 }
1009
1010 /* Flush and disable tx queue and its parent SMQ */
1011 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
1012 {
1013         struct otx2_nix_tm_node *tm_node, *sibling;
1014         struct otx2_eth_txq *txq;
1015         struct otx2_eth_dev *dev;
1016         uint16_t sq;
1017         bool user;
1018         int rc;
1019
1020         txq = _txq;
1021         dev = txq->dev;
1022         sq = txq->sq;
1023
1024         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1025
1026         /* Find the node for this SQ */
1027         tm_node = nix_tm_node_search(dev, sq, user);
1028         if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1029                 otx2_err("Invalid node/state for sq %u", sq);
1030                 return -EFAULT;
1031         }
1032
1033         /* Enable CGX RXTX to drain pkts */
1034         if (!dev_started) {
1035                 /* Though it enables both RX MCAM Entries and CGX Link
1036                  * we assume all the rx queues are stopped way back.
1037                  */
1038                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1039                 rc = otx2_mbox_process(dev->mbox);
1040                 if (rc) {
1041                         otx2_err("cgx start failed, rc=%d", rc);
1042                         return rc;
1043                 }
1044         }
1045
1046         /* Disable smq xoff for case it was enabled earlier */
1047         rc = nix_smq_xoff(dev, tm_node->parent, false);
1048         if (rc) {
1049                 otx2_err("Failed to enable smq %u, rc=%d",
1050                          tm_node->parent->hw_id, rc);
1051                 return rc;
1052         }
1053
1054         /* As per HRM, to disable an SQ, all other SQ's
1055          * that feed to same SMQ must be paused before SMQ flush.
1056          */
1057         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1058                 if (sibling->parent != tm_node->parent)
1059                         continue;
1060                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1061                         continue;
1062
1063                 sq = sibling->id;
1064                 txq = dev->eth_dev->data->tx_queues[sq];
1065                 if (!txq)
1066                         continue;
1067
1068                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1069                 if (rc) {
1070                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1071                         goto cleanup;
1072                 }
1073
1074                 /* Wait for sq entries to be flushed */
1075                 rc = nix_txq_flush_sq_spin(txq);
1076                 if (rc) {
1077                         otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1078                         return rc;
1079                 }
1080         }
1081
1082         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1083
1084         /* Disable and flush */
1085         rc = nix_smq_xoff(dev, tm_node->parent, true);
1086         if (rc) {
1087                 otx2_err("Failed to disable smq %u, rc=%d",
1088                          tm_node->parent->hw_id, rc);
1089                 goto cleanup;
1090         }
1091 cleanup:
1092         /* Restore cgx state */
1093         if (!dev_started) {
1094                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1095                 rc |= otx2_mbox_process(dev->mbox);
1096         }
1097
1098         return rc;
1099 }
1100
1101 int otx2_nix_sq_flush_post(void *_txq)
1102 {
1103         struct otx2_nix_tm_node *tm_node, *sibling;
1104         struct otx2_eth_txq *txq = _txq;
1105         struct otx2_eth_txq *s_txq;
1106         struct otx2_eth_dev *dev;
1107         bool once = false;
1108         uint16_t sq, s_sq;
1109         bool user;
1110         int rc;
1111
1112         dev = txq->dev;
1113         sq = txq->sq;
1114         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1115
1116         /* Find the node for this SQ */
1117         tm_node = nix_tm_node_search(dev, sq, user);
1118         if (!tm_node) {
1119                 otx2_err("Invalid node for sq %u", sq);
1120                 return -EFAULT;
1121         }
1122
1123         /* Enable all the siblings back */
1124         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1125                 if (sibling->parent != tm_node->parent)
1126                         continue;
1127
1128                 if (sibling->id == sq)
1129                         continue;
1130
1131                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1132                         continue;
1133
1134                 s_sq = sibling->id;
1135                 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1136                 if (!s_txq)
1137                         continue;
1138
1139                 if (!once) {
1140                         /* Enable back if any SQ is still present */
1141                         rc = nix_smq_xoff(dev, tm_node->parent, false);
1142                         if (rc) {
1143                                 otx2_err("Failed to enable smq %u, rc=%d",
1144                                          tm_node->parent->hw_id, rc);
1145                                 return rc;
1146                         }
1147                         once = true;
1148                 }
1149
1150                 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1151                 if (rc) {
1152                         otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1153                         return rc;
1154                 }
1155         }
1156
1157         return 0;
1158 }
1159
1160 static int
1161 nix_sq_sched_data(struct otx2_eth_dev *dev,
1162                   struct otx2_nix_tm_node *tm_node,
1163                   bool rr_quantum_only)
1164 {
1165         struct rte_eth_dev *eth_dev = dev->eth_dev;
1166         struct otx2_mbox *mbox = dev->mbox;
1167         uint16_t sq = tm_node->id, smq;
1168         struct nix_aq_enq_req *req;
1169         uint64_t rr_quantum;
1170         int rc;
1171
1172         smq = tm_node->parent->hw_id;
1173         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1174
1175         if (rr_quantum_only)
1176                 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1177         else
1178                 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1179                             sq, smq, rr_quantum);
1180
1181         if (sq > eth_dev->data->nb_tx_queues)
1182                 return -EFAULT;
1183
1184         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1185         req->qidx = sq;
1186         req->ctype = NIX_AQ_CTYPE_SQ;
1187         req->op = NIX_AQ_INSTOP_WRITE;
1188
1189         /* smq update only when needed */
1190         if (!rr_quantum_only) {
1191                 req->sq.smq = smq;
1192                 req->sq_mask.smq = ~req->sq_mask.smq;
1193         }
1194         req->sq.smq_rr_quantum = rr_quantum;
1195         req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1196
1197         rc = otx2_mbox_process(mbox);
1198         if (rc)
1199                 otx2_err("Failed to set smq, rc=%d", rc);
1200         return rc;
1201 }
1202
1203 int otx2_nix_sq_enable(void *_txq)
1204 {
1205         struct otx2_eth_txq *txq = _txq;
1206         int rc;
1207
1208         /* Enable sqb_aura fc */
1209         rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1210         if (rc) {
1211                 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1212                 return rc;
1213         }
1214
1215         return 0;
1216 }
1217
1218 static int
1219 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1220                       uint32_t flags, bool hw_only)
1221 {
1222         struct otx2_nix_tm_shaper_profile *profile;
1223         struct otx2_nix_tm_node *tm_node, *next_node;
1224         struct otx2_mbox *mbox = dev->mbox;
1225         struct nix_txsch_free_req *req;
1226         uint32_t profile_id;
1227         int rc = 0;
1228
1229         next_node = TAILQ_FIRST(&dev->node_list);
1230         while (next_node) {
1231                 tm_node = next_node;
1232                 next_node = TAILQ_NEXT(tm_node, node);
1233
1234                 /* Check for only requested nodes */
1235                 if ((tm_node->flags & flags_mask) != flags)
1236                         continue;
1237
1238                 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1239                     tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1240                     tm_node->flags & NIX_TM_NODE_HWRES) {
1241                         /* Free specific HW resource */
1242                         otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1243                                     nix_hwlvl2str(tm_node->hw_lvl),
1244                                     tm_node->hw_id, tm_node->lvl,
1245                                     tm_node->id, tm_node);
1246
1247                         rc = nix_clear_path_xoff(dev, tm_node);
1248                         if (rc)
1249                                 return rc;
1250
1251                         req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1252                         req->flags = 0;
1253                         req->schq_lvl = tm_node->hw_lvl;
1254                         req->schq = tm_node->hw_id;
1255                         rc = otx2_mbox_process(mbox);
1256                         if (rc)
1257                                 return rc;
1258                         tm_node->flags &= ~NIX_TM_NODE_HWRES;
1259                 }
1260
1261                 /* Leave software elements if needed */
1262                 if (hw_only)
1263                         continue;
1264
1265                 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1266                             tm_node->lvl, tm_node->id, tm_node);
1267
1268                 profile_id = tm_node->params.shaper_profile_id;
1269                 profile = nix_tm_shaper_profile_search(dev, profile_id);
1270                 if (profile)
1271                         profile->reference_count--;
1272
1273                 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1274                 rte_free(tm_node);
1275         }
1276
1277         if (!flags_mask) {
1278                 /* Free all hw resources */
1279                 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1280                 req->flags = TXSCHQ_FREE_ALL;
1281
1282                 return otx2_mbox_process(mbox);
1283         }
1284
1285         return rc;
1286 }
1287
1288 static uint8_t
1289 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1290                        struct nix_txsch_alloc_rsp *rsp)
1291 {
1292         uint16_t schq;
1293         uint8_t lvl;
1294
1295         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1296                 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1297                         dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1298                         dev->txschq_contig_list[lvl][schq] =
1299                                 rsp->schq_contig_list[lvl][schq];
1300                 }
1301
1302                 dev->txschq[lvl] = rsp->schq[lvl];
1303                 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1304         }
1305         return 0;
1306 }
1307
1308 static int
1309 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1310                          struct otx2_nix_tm_node *child,
1311                          struct otx2_nix_tm_node *parent)
1312 {
1313         uint32_t hw_id, schq_con_index, prio_offset;
1314         uint32_t l_id, schq_index;
1315
1316         otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1317                     nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1318
1319         child->flags |= NIX_TM_NODE_HWRES;
1320
1321         /* Process root nodes */
1322         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1323             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1324                 int idx = 0;
1325                 uint32_t tschq_con_index;
1326
1327                 l_id = child->hw_lvl;
1328                 tschq_con_index = dev->txschq_contig_index[l_id];
1329                 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1330                 child->hw_id = hw_id;
1331                 dev->txschq_contig_index[l_id]++;
1332                 /* Update TL1 hw_id for its parent for config purpose */
1333                 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1334                 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1335                 child->parent_hw_id = hw_id;
1336                 return 0;
1337         }
1338         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1339             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1340                 uint32_t tschq_con_index;
1341
1342                 l_id = child->hw_lvl;
1343                 tschq_con_index = dev->txschq_index[l_id];
1344                 hw_id = dev->txschq_list[l_id][tschq_con_index];
1345                 child->hw_id = hw_id;
1346                 dev->txschq_index[l_id]++;
1347                 return 0;
1348         }
1349
1350         /* Process children with parents */
1351         l_id = child->hw_lvl;
1352         schq_index = dev->txschq_index[l_id];
1353         schq_con_index = dev->txschq_contig_index[l_id];
1354
1355         if (child->priority == parent->rr_prio) {
1356                 hw_id = dev->txschq_list[l_id][schq_index];
1357                 child->hw_id = hw_id;
1358                 child->parent_hw_id = parent->hw_id;
1359                 dev->txschq_index[l_id]++;
1360         } else {
1361                 prio_offset = schq_con_index + child->priority;
1362                 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1363                 child->hw_id = hw_id;
1364         }
1365         return 0;
1366 }
1367
1368 static int
1369 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1370 {
1371         struct otx2_nix_tm_node *parent, *child;
1372         uint32_t child_hw_lvl, con_index_inc, i;
1373
1374         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1375                 TAILQ_FOREACH(parent, &dev->node_list, node) {
1376                         child_hw_lvl = parent->hw_lvl - 1;
1377                         if (parent->hw_lvl != i)
1378                                 continue;
1379                         TAILQ_FOREACH(child, &dev->node_list, node) {
1380                                 if (!child->parent)
1381                                         continue;
1382                                 if (child->parent->id != parent->id)
1383                                         continue;
1384                                 nix_tm_assign_id_to_node(dev, child, parent);
1385                         }
1386
1387                         con_index_inc = parent->max_prio + 1;
1388                         dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1389
1390                         /*
1391                          * Explicitly assign id to parent node if it
1392                          * doesn't have a parent
1393                          */
1394                         if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1395                                 nix_tm_assign_id_to_node(dev, parent, NULL);
1396                 }
1397         }
1398         return 0;
1399 }
1400
1401 static uint8_t
1402 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1403                       struct nix_txsch_alloc_req *req, uint8_t lvl)
1404 {
1405         struct otx2_nix_tm_node *tm_node;
1406         uint8_t contig_count;
1407
1408         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1409                 if (lvl == tm_node->hw_lvl) {
1410                         req->schq[lvl - 1] += tm_node->rr_num;
1411                         if (tm_node->max_prio != UINT32_MAX) {
1412                                 contig_count = tm_node->max_prio + 1;
1413                                 req->schq_contig[lvl - 1] += contig_count;
1414                         }
1415                 }
1416                 if (lvl == dev->otx2_tm_root_lvl &&
1417                     dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1418                     tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1419                         req->schq_contig[dev->otx2_tm_root_lvl]++;
1420                 }
1421         }
1422
1423         req->schq[NIX_TXSCH_LVL_TL1] = 1;
1424         req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1425
1426         return 0;
1427 }
1428
1429 static int
1430 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1431                           struct nix_txsch_alloc_req *req)
1432 {
1433         uint8_t i;
1434
1435         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1436                 nix_tm_count_req_schq(dev, req, i);
1437
1438         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1439                 dev->txschq_index[i] = 0;
1440                 dev->txschq_contig_index[i] = 0;
1441         }
1442         return 0;
1443 }
1444
1445 static int
1446 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1447 {
1448         struct otx2_mbox *mbox = dev->mbox;
1449         struct nix_txsch_alloc_req *req;
1450         struct nix_txsch_alloc_rsp *rsp;
1451         int rc;
1452
1453         req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1454
1455         rc = nix_tm_prepare_txschq_req(dev, req);
1456         if (rc)
1457                 return rc;
1458
1459         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1460         if (rc)
1461                 return rc;
1462
1463         nix_tm_copy_rsp_to_dev(dev, rsp);
1464         dev->link_cfg_lvl = rsp->link_cfg_lvl;
1465
1466         nix_tm_assign_hw_id(dev);
1467         return 0;
1468 }
1469
1470 static int
1471 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1472 {
1473         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1474         struct otx2_nix_tm_node *tm_node;
1475         struct otx2_eth_txq *txq;
1476         uint16_t sq;
1477         int rc;
1478
1479         nix_tm_update_parent_info(dev);
1480
1481         rc = nix_tm_send_txsch_alloc_msg(dev);
1482         if (rc) {
1483                 otx2_err("TM failed to alloc tm resources=%d", rc);
1484                 return rc;
1485         }
1486
1487         rc = nix_tm_txsch_reg_config(dev);
1488         if (rc) {
1489                 otx2_err("TM failed to configure sched registers=%d", rc);
1490                 return rc;
1491         }
1492
1493         /* Trigger MTU recalculate as SMQ needs MTU conf */
1494         if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1495                 rc = otx2_nix_recalc_mtu(eth_dev);
1496                 if (rc) {
1497                         otx2_err("TM MTU update failed, rc=%d", rc);
1498                         return rc;
1499                 }
1500         }
1501
1502         /* Mark all non-leaf's as enabled */
1503         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1504                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1505                         tm_node->flags |= NIX_TM_NODE_ENABLED;
1506         }
1507
1508         if (!xmit_enable)
1509                 return 0;
1510
1511         /* Update SQ Sched Data while SQ is idle */
1512         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1513                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1514                         continue;
1515
1516                 rc = nix_sq_sched_data(dev, tm_node, false);
1517                 if (rc) {
1518                         otx2_err("SQ %u sched update failed, rc=%d",
1519                                  tm_node->id, rc);
1520                         return rc;
1521                 }
1522         }
1523
1524         /* Finally XON all SMQ's */
1525         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1526                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1527                         continue;
1528
1529                 rc = nix_smq_xoff(dev, tm_node, false);
1530                 if (rc) {
1531                         otx2_err("Failed to enable smq %u, rc=%d",
1532                                  tm_node->hw_id, rc);
1533                         return rc;
1534                 }
1535         }
1536
1537         /* Enable xmit as all the topology is ready */
1538         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1539                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1540                         continue;
1541
1542                 sq = tm_node->id;
1543                 txq = eth_dev->data->tx_queues[sq];
1544
1545                 rc = otx2_nix_sq_enable(txq);
1546                 if (rc) {
1547                         otx2_err("TM sw xon failed on SQ %u, rc=%d",
1548                                  tm_node->id, rc);
1549                         return rc;
1550                 }
1551                 tm_node->flags |= NIX_TM_NODE_ENABLED;
1552         }
1553
1554         return 0;
1555 }
1556
1557 static int
1558 send_tm_reqval(struct otx2_mbox *mbox,
1559                struct nix_txschq_config *req,
1560                struct rte_tm_error *error)
1561 {
1562         int rc;
1563
1564         if (!req->num_regs ||
1565             req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1566                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1567                 error->message = "invalid config";
1568                 return -EIO;
1569         }
1570
1571         rc = otx2_mbox_process(mbox);
1572         if (rc) {
1573                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1574                 error->message = "unexpected fatal error";
1575         }
1576         return rc;
1577 }
1578
1579 static uint16_t
1580 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1581 {
1582         if (nix_tm_have_tl1_access(dev)) {
1583                 switch (lvl) {
1584                 case OTX2_TM_LVL_ROOT:
1585                         return NIX_TXSCH_LVL_TL1;
1586                 case OTX2_TM_LVL_SCH1:
1587                         return NIX_TXSCH_LVL_TL2;
1588                 case OTX2_TM_LVL_SCH2:
1589                         return NIX_TXSCH_LVL_TL3;
1590                 case OTX2_TM_LVL_SCH3:
1591                         return NIX_TXSCH_LVL_TL4;
1592                 case OTX2_TM_LVL_SCH4:
1593                         return NIX_TXSCH_LVL_SMQ;
1594                 default:
1595                         return NIX_TXSCH_LVL_CNT;
1596                 }
1597         } else {
1598                 switch (lvl) {
1599                 case OTX2_TM_LVL_ROOT:
1600                         return NIX_TXSCH_LVL_TL2;
1601                 case OTX2_TM_LVL_SCH1:
1602                         return NIX_TXSCH_LVL_TL3;
1603                 case OTX2_TM_LVL_SCH2:
1604                         return NIX_TXSCH_LVL_TL4;
1605                 case OTX2_TM_LVL_SCH3:
1606                         return NIX_TXSCH_LVL_SMQ;
1607                 default:
1608                         return NIX_TXSCH_LVL_CNT;
1609                 }
1610         }
1611 }
1612
1613 static uint16_t
1614 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1615 {
1616         if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1617                 return 0;
1618
1619         /* MDQ doesn't support SP */
1620         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1621                 return 0;
1622
1623         /* PF's TL1 with VF's enabled doesn't support SP */
1624         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1625             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1626              (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1627                 return 0;
1628
1629         return TXSCH_TLX_SP_PRIO_MAX - 1;
1630 }
1631
1632
1633 static int
1634 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1635               uint32_t parent_id, uint32_t priority,
1636               struct rte_tm_error *error)
1637 {
1638         uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1639         struct otx2_nix_tm_node *tm_node;
1640         uint32_t rr_num = 0;
1641         int i;
1642
1643         /* Validate priority against max */
1644         if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1645                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1646                 error->message = "unsupported priority value";
1647                 return -EINVAL;
1648         }
1649
1650         if (parent_id == RTE_TM_NODE_ID_NULL)
1651                 return 0;
1652
1653         memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1654         priorities[priority] = 1;
1655
1656         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1657                 if (!tm_node->parent)
1658                         continue;
1659
1660                 if (!(tm_node->flags & NIX_TM_NODE_USER))
1661                         continue;
1662
1663                 if (tm_node->parent->id != parent_id)
1664                         continue;
1665
1666                 priorities[tm_node->priority]++;
1667         }
1668
1669         for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1670                 if (priorities[i] > 1)
1671                         rr_num++;
1672
1673         /* At max, one rr groups per parent */
1674         if (rr_num > 1) {
1675                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1676                 error->message = "multiple DWRR node priority";
1677                 return -EINVAL;
1678         }
1679
1680         /* Check for previous priority to avoid holes in priorities */
1681         if (priority && !priorities[priority - 1]) {
1682                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1683                 error->message = "priority not in order";
1684                 return -EINVAL;
1685         }
1686
1687         return 0;
1688 }
1689
1690 static int
1691 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1692             uint64_t *regval, uint32_t hw_lvl)
1693 {
1694         volatile struct nix_txschq_config *req;
1695         struct nix_txschq_config *rsp;
1696         int rc;
1697
1698         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1699         req->read = 1;
1700         req->lvl = hw_lvl;
1701         req->reg[0] = reg;
1702         req->num_regs = 1;
1703
1704         rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1705         if (rc)
1706                 return rc;
1707         *regval = rsp->regval[0];
1708         return 0;
1709 }
1710
1711 /* Search for min rate in topology */
1712 static void
1713 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1714 {
1715         struct otx2_nix_tm_shaper_profile *profile;
1716         uint64_t rate_min = 1E9; /* 1 Gbps */
1717
1718         TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1719                 if (profile->params.peak.rate &&
1720                     profile->params.peak.rate < rate_min)
1721                         rate_min = profile->params.peak.rate;
1722
1723                 if (profile->params.committed.rate &&
1724                     profile->params.committed.rate < rate_min)
1725                         rate_min = profile->params.committed.rate;
1726         }
1727
1728         dev->tm_rate_min = rate_min;
1729 }
1730
1731 static int
1732 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1733 {
1734         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1735         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1736         uint16_t sqb_cnt, head_off, tail_off;
1737         struct otx2_nix_tm_node *tm_node;
1738         struct otx2_eth_txq *txq;
1739         uint64_t wdata, val;
1740         int i, rc;
1741
1742         otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1743
1744         /* Enable CGX RXTX to drain pkts */
1745         if (!eth_dev->data->dev_started) {
1746                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1747                 rc = otx2_mbox_process(dev->mbox);
1748                 if (rc)
1749                         return rc;
1750         }
1751
1752         /* XON all SMQ's */
1753         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1754                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1755                         continue;
1756                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1757                         continue;
1758
1759                 rc = nix_smq_xoff(dev, tm_node, false);
1760                 if (rc) {
1761                         otx2_err("Failed to enable smq %u, rc=%d",
1762                                  tm_node->hw_id, rc);
1763                         goto cleanup;
1764                 }
1765         }
1766
1767         /* Flush all tx queues */
1768         for (i = 0; i < sq_cnt; i++) {
1769                 txq = eth_dev->data->tx_queues[i];
1770
1771                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1772                 if (rc) {
1773                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1774                         goto cleanup;
1775                 }
1776
1777                 /* Wait for sq entries to be flushed */
1778                 rc = nix_txq_flush_sq_spin(txq);
1779                 if (rc) {
1780                         otx2_err("Failed to drain sq, rc=%d\n", rc);
1781                         goto cleanup;
1782                 }
1783         }
1784
1785         /* XOFF & Flush all SMQ's. HRM mandates
1786          * all SQ's empty before SMQ flush is issued.
1787          */
1788         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1789                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1790                         continue;
1791                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1792                         continue;
1793
1794                 rc = nix_smq_xoff(dev, tm_node, true);
1795                 if (rc) {
1796                         otx2_err("Failed to enable smq %u, rc=%d",
1797                                  tm_node->hw_id, rc);
1798                         goto cleanup;
1799                 }
1800         }
1801
1802         /* Verify sanity of all tx queues */
1803         for (i = 0; i < sq_cnt; i++) {
1804                 txq = eth_dev->data->tx_queues[i];
1805
1806                 wdata = ((uint64_t)txq->sq << 32);
1807                 val = otx2_atomic64_add_nosync(wdata,
1808                                (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1809
1810                 sqb_cnt = val & 0xFFFF;
1811                 head_off = (val >> 20) & 0x3F;
1812                 tail_off = (val >> 28) & 0x3F;
1813
1814                 if (sqb_cnt > 1 || head_off != tail_off ||
1815                     (*txq->fc_mem != txq->nb_sqb_bufs))
1816                         otx2_err("Failed to gracefully flush sq %u", txq->sq);
1817         }
1818
1819 cleanup:
1820         /* restore cgx state */
1821         if (!eth_dev->data->dev_started) {
1822                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1823                 rc |= otx2_mbox_process(dev->mbox);
1824         }
1825
1826         return rc;
1827 }
1828
1829 static int
1830 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1831                           int *is_leaf, struct rte_tm_error *error)
1832 {
1833         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1834         struct otx2_nix_tm_node *tm_node;
1835
1836         if (is_leaf == NULL) {
1837                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1838                 return -EINVAL;
1839         }
1840
1841         tm_node = nix_tm_node_search(dev, node_id, true);
1842         if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1843                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1844                 return -EINVAL;
1845         }
1846         if (nix_tm_is_leaf(dev, tm_node->lvl))
1847                 *is_leaf = true;
1848         else
1849                 *is_leaf = false;
1850         return 0;
1851 }
1852
1853 static int
1854 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1855                      struct rte_tm_capabilities *cap,
1856                      struct rte_tm_error *error)
1857 {
1858         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1859         struct otx2_mbox *mbox = dev->mbox;
1860         int rc, max_nr_nodes = 0, i;
1861         struct free_rsrcs_rsp *rsp;
1862
1863         memset(cap, 0, sizeof(*cap));
1864
1865         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1866         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1867         if (rc) {
1868                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1869                 error->message = "unexpected fatal error";
1870                 return rc;
1871         }
1872
1873         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1874                 max_nr_nodes += rsp->schq[i];
1875
1876         cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1877         /* TL1 level is reserved for PF */
1878         cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1879                                 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1880         cap->non_leaf_nodes_identical = 1;
1881         cap->leaf_nodes_identical = 1;
1882
1883         /* Shaper Capabilities */
1884         cap->shaper_private_n_max = max_nr_nodes;
1885         cap->shaper_n_max = max_nr_nodes;
1886         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1887         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1888         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1889         cap->shaper_pkt_length_adjust_min = 0;
1890         cap->shaper_pkt_length_adjust_max = 0;
1891
1892         /* Schedule Capabilities */
1893         cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1894         cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1895         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1896         cap->sched_wfq_n_groups_max = 1;
1897         cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1898
1899         cap->dynamic_update_mask =
1900                 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1901                 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1902         cap->stats_mask =
1903                 RTE_TM_STATS_N_PKTS |
1904                 RTE_TM_STATS_N_BYTES |
1905                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1906                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1907
1908         for (i = 0; i < RTE_COLORS; i++) {
1909                 cap->mark_vlan_dei_supported[i] = false;
1910                 cap->mark_ip_ecn_tcp_supported[i] = false;
1911                 cap->mark_ip_dscp_supported[i] = false;
1912         }
1913
1914         return 0;
1915 }
1916
1917 static int
1918 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1919                                    struct rte_tm_level_capabilities *cap,
1920                                    struct rte_tm_error *error)
1921 {
1922         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1923         struct otx2_mbox *mbox = dev->mbox;
1924         struct free_rsrcs_rsp *rsp;
1925         uint16_t hw_lvl;
1926         int rc;
1927
1928         memset(cap, 0, sizeof(*cap));
1929
1930         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1931         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1932         if (rc) {
1933                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1934                 error->message = "unexpected fatal error";
1935                 return rc;
1936         }
1937
1938         hw_lvl = nix_tm_lvl2nix(dev, lvl);
1939
1940         if (nix_tm_is_leaf(dev, lvl)) {
1941                 /* Leaf */
1942                 cap->n_nodes_max = dev->tm_leaf_cnt;
1943                 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1944                 cap->leaf_nodes_identical = 1;
1945                 cap->leaf.stats_mask =
1946                         RTE_TM_STATS_N_PKTS |
1947                         RTE_TM_STATS_N_BYTES;
1948
1949         } else if (lvl == OTX2_TM_LVL_ROOT) {
1950                 /* Root node, aka TL2(vf)/TL1(pf) */
1951                 cap->n_nodes_max = 1;
1952                 cap->n_nodes_nonleaf_max = 1;
1953                 cap->non_leaf_nodes_identical = 1;
1954
1955                 cap->nonleaf.shaper_private_supported = true;
1956                 cap->nonleaf.shaper_private_dual_rate_supported =
1957                         nix_tm_have_tl1_access(dev) ? false : true;
1958                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1959                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1960
1961                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
1962                 cap->nonleaf.sched_sp_n_priorities_max =
1963                                         nix_max_prio(dev, hw_lvl) + 1;
1964                 cap->nonleaf.sched_wfq_n_groups_max = 1;
1965                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1966
1967                 if (nix_tm_have_tl1_access(dev))
1968                         cap->nonleaf.stats_mask =
1969                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1970                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1971         } else if ((lvl < OTX2_TM_LVL_MAX) &&
1972                    (hw_lvl < NIX_TXSCH_LVL_CNT)) {
1973                 /* TL2, TL3, TL4, MDQ */
1974                 cap->n_nodes_max = rsp->schq[hw_lvl];
1975                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
1976                 cap->non_leaf_nodes_identical = 1;
1977
1978                 cap->nonleaf.shaper_private_supported = true;
1979                 cap->nonleaf.shaper_private_dual_rate_supported = true;
1980                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1981                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1982
1983                 /* MDQ doesn't support Strict Priority */
1984                 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1985                         cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
1986                 else
1987                         cap->nonleaf.sched_n_children_max =
1988                                 rsp->schq[hw_lvl - 1];
1989                 cap->nonleaf.sched_sp_n_priorities_max =
1990                         nix_max_prio(dev, hw_lvl) + 1;
1991                 cap->nonleaf.sched_wfq_n_groups_max = 1;
1992                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1993         } else {
1994                 /* unsupported level */
1995                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1996                 return rc;
1997         }
1998         return 0;
1999 }
2000
2001 static int
2002 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
2003                           struct rte_tm_node_capabilities *cap,
2004                           struct rte_tm_error *error)
2005 {
2006         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2007         struct otx2_mbox *mbox = dev->mbox;
2008         struct otx2_nix_tm_node *tm_node;
2009         struct free_rsrcs_rsp *rsp;
2010         int rc, hw_lvl, lvl;
2011
2012         memset(cap, 0, sizeof(*cap));
2013
2014         tm_node = nix_tm_node_search(dev, node_id, true);
2015         if (!tm_node) {
2016                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2017                 error->message = "no such node";
2018                 return -EINVAL;
2019         }
2020
2021         hw_lvl = tm_node->hw_lvl;
2022         lvl = tm_node->lvl;
2023
2024         /* Leaf node */
2025         if (nix_tm_is_leaf(dev, lvl)) {
2026                 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2027                                         RTE_TM_STATS_N_BYTES;
2028                 return 0;
2029         }
2030
2031         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2032         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2033         if (rc) {
2034                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2035                 error->message = "unexpected fatal error";
2036                 return rc;
2037         }
2038
2039         /* Non Leaf Shaper */
2040         cap->shaper_private_supported = true;
2041         cap->shaper_private_dual_rate_supported =
2042                 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2043         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2044         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2045
2046         /* Non Leaf Scheduler */
2047         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2048                 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2049         else
2050                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2051
2052         cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2053         cap->nonleaf.sched_wfq_n_children_per_group_max =
2054                 cap->nonleaf.sched_n_children_max;
2055         cap->nonleaf.sched_wfq_n_groups_max = 1;
2056         cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2057
2058         if (hw_lvl == NIX_TXSCH_LVL_TL1)
2059                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2060                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2061         return 0;
2062 }
2063
2064 static int
2065 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2066                                uint32_t profile_id,
2067                                struct rte_tm_shaper_params *params,
2068                                struct rte_tm_error *error)
2069 {
2070         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2071         struct otx2_nix_tm_shaper_profile *profile;
2072
2073         profile = nix_tm_shaper_profile_search(dev, profile_id);
2074         if (profile) {
2075                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2076                 error->message = "shaper profile ID exist";
2077                 return -EINVAL;
2078         }
2079
2080         /* Committed rate and burst size can be enabled/disabled */
2081         if (params->committed.size || params->committed.rate) {
2082                 if (params->committed.size < MIN_SHAPER_BURST ||
2083                     params->committed.size > MAX_SHAPER_BURST) {
2084                         error->type =
2085                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2086                         return -EINVAL;
2087                 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2088                                                NULL, NULL, NULL)) {
2089                         error->type =
2090                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2091                         error->message = "shaper committed rate invalid";
2092                         return -EINVAL;
2093                 }
2094         }
2095
2096         /* Peak rate and burst size can be enabled/disabled */
2097         if (params->peak.size || params->peak.rate) {
2098                 if (params->peak.size < MIN_SHAPER_BURST ||
2099                     params->peak.size > MAX_SHAPER_BURST) {
2100                         error->type =
2101                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2102                         return -EINVAL;
2103                 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2104                                                NULL, NULL, NULL)) {
2105                         error->type =
2106                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2107                         error->message = "shaper peak rate invalid";
2108                         return -EINVAL;
2109                 }
2110         }
2111
2112         profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2113                               sizeof(struct otx2_nix_tm_shaper_profile), 0);
2114         if (!profile)
2115                 return -ENOMEM;
2116
2117         profile->shaper_profile_id = profile_id;
2118         rte_memcpy(&profile->params, params,
2119                    sizeof(struct rte_tm_shaper_params));
2120         TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2121
2122         otx2_tm_dbg("Added TM shaper profile %u, "
2123                     " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2124                     ", cbs %" PRIu64 " , adj %u",
2125                     profile_id,
2126                     params->peak.rate * 8,
2127                     params->peak.size,
2128                     params->committed.rate * 8,
2129                     params->committed.size,
2130                     params->pkt_length_adjust);
2131
2132         /* Translate rate as bits per second */
2133         profile->params.peak.rate = profile->params.peak.rate * 8;
2134         profile->params.committed.rate = profile->params.committed.rate * 8;
2135         /* Always use PIR for single rate shaping */
2136         if (!params->peak.rate && params->committed.rate) {
2137                 profile->params.peak = profile->params.committed;
2138                 memset(&profile->params.committed, 0,
2139                        sizeof(profile->params.committed));
2140         }
2141
2142         /* update min rate */
2143         nix_tm_shaper_profile_update_min(dev);
2144         return 0;
2145 }
2146
2147 static int
2148 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2149                                   uint32_t profile_id,
2150                                   struct rte_tm_error *error)
2151 {
2152         struct otx2_nix_tm_shaper_profile *profile;
2153         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2154
2155         profile = nix_tm_shaper_profile_search(dev, profile_id);
2156
2157         if (!profile) {
2158                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2159                 error->message = "shaper profile ID not exist";
2160                 return -EINVAL;
2161         }
2162
2163         if (profile->reference_count) {
2164                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2165                 error->message = "shaper profile in use";
2166                 return -EINVAL;
2167         }
2168
2169         otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2170         TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2171         rte_free(profile);
2172
2173         /* update min rate */
2174         nix_tm_shaper_profile_update_min(dev);
2175         return 0;
2176 }
2177
2178 static int
2179 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2180                      uint32_t parent_node_id, uint32_t priority,
2181                      uint32_t weight, uint32_t lvl,
2182                      struct rte_tm_node_params *params,
2183                      struct rte_tm_error *error)
2184 {
2185         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2186         struct otx2_nix_tm_node *parent_node;
2187         int rc, clear_on_fail = 0;
2188         uint32_t exp_next_lvl;
2189         uint16_t hw_lvl;
2190
2191         /* we don't support dynamic updates */
2192         if (dev->tm_flags & NIX_TM_COMMITTED) {
2193                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2194                 error->message = "dynamic update not supported";
2195                 return -EIO;
2196         }
2197
2198         /* Leaf nodes have to be same priority */
2199         if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2200                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2201                 error->message = "queue shapers must be priority 0";
2202                 return -EIO;
2203         }
2204
2205         parent_node = nix_tm_node_search(dev, parent_node_id, true);
2206
2207         /* find the right level */
2208         if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2209                 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2210                         lvl = OTX2_TM_LVL_ROOT;
2211                 } else if (parent_node) {
2212                         lvl = parent_node->lvl + 1;
2213                 } else {
2214                         /* Neigher proper parent nor proper level id given */
2215                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2216                         error->message = "invalid parent node id";
2217                         return -ERANGE;
2218                 }
2219         }
2220
2221         /* Translate rte_tm level id's to nix hw level id's */
2222         hw_lvl = nix_tm_lvl2nix(dev, lvl);
2223         if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2224             !nix_tm_is_leaf(dev, lvl)) {
2225                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2226                 error->message = "invalid level id";
2227                 return -ERANGE;
2228         }
2229
2230         if (node_id < dev->tm_leaf_cnt)
2231                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2232         else
2233                 exp_next_lvl = hw_lvl + 1;
2234
2235         /* Check if there is no parent node yet */
2236         if (hw_lvl != dev->otx2_tm_root_lvl &&
2237             (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2238                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2239                 error->message = "invalid parent node id";
2240                 return -EINVAL;
2241         }
2242
2243         /* Check if a node already exists */
2244         if (nix_tm_node_search(dev, node_id, true)) {
2245                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2246                 error->message = "node already exists";
2247                 return -EINVAL;
2248         }
2249
2250         /* Check if shaper profile exists for non leaf node */
2251         if (!nix_tm_is_leaf(dev, lvl) &&
2252             params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
2253             !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) {
2254                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2255                 error->message = "invalid shaper profile";
2256                 return -EINVAL;
2257         }
2258
2259         /* Check if there is second DWRR already in siblings or holes in prio */
2260         if (validate_prio(dev, lvl, parent_node_id, priority, error))
2261                 return -EINVAL;
2262
2263         if (weight > MAX_SCHED_WEIGHT) {
2264                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2265                 error->message = "max weight exceeded";
2266                 return -EINVAL;
2267         }
2268
2269         rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2270                                      priority, weight, hw_lvl,
2271                                      lvl, true, params);
2272         if (rc) {
2273                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2274                 /* cleanup user added nodes */
2275                 if (clear_on_fail)
2276                         nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2277                                               NIX_TM_NODE_USER, false);
2278                 error->message = "failed to add node";
2279                 return rc;
2280         }
2281         error->type = RTE_TM_ERROR_TYPE_NONE;
2282         return 0;
2283 }
2284
2285 static int
2286 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2287                         struct rte_tm_error *error)
2288 {
2289         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2290         struct otx2_nix_tm_node *tm_node, *child_node;
2291         struct otx2_nix_tm_shaper_profile *profile;
2292         uint32_t profile_id;
2293
2294         /* we don't support dynamic updates yet */
2295         if (dev->tm_flags & NIX_TM_COMMITTED) {
2296                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2297                 error->message = "hierarchy exists";
2298                 return -EIO;
2299         }
2300
2301         if (node_id == RTE_TM_NODE_ID_NULL) {
2302                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2303                 error->message = "invalid node id";
2304                 return -EINVAL;
2305         }
2306
2307         tm_node = nix_tm_node_search(dev, node_id, true);
2308         if (!tm_node) {
2309                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2310                 error->message = "no such node";
2311                 return -EINVAL;
2312         }
2313
2314         /* Check for any existing children */
2315         TAILQ_FOREACH(child_node, &dev->node_list, node) {
2316                 if (child_node->parent == tm_node) {
2317                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2318                         error->message = "children exist";
2319                         return -EINVAL;
2320                 }
2321         }
2322
2323         /* Remove shaper profile reference */
2324         profile_id = tm_node->params.shaper_profile_id;
2325         profile = nix_tm_shaper_profile_search(dev, profile_id);
2326         profile->reference_count--;
2327
2328         TAILQ_REMOVE(&dev->node_list, tm_node, node);
2329         rte_free(tm_node);
2330         return 0;
2331 }
2332
2333 static int
2334 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2335                            struct rte_tm_error *error, bool suspend)
2336 {
2337         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2338         struct otx2_mbox *mbox = dev->mbox;
2339         struct otx2_nix_tm_node *tm_node;
2340         struct nix_txschq_config *req;
2341         uint16_t flags;
2342         int rc;
2343
2344         tm_node = nix_tm_node_search(dev, node_id, true);
2345         if (!tm_node) {
2346                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2347                 error->message = "no such node";
2348                 return -EINVAL;
2349         }
2350
2351         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2352                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2353                 error->message = "hierarchy doesn't exist";
2354                 return -EINVAL;
2355         }
2356
2357         flags = tm_node->flags;
2358         flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2359                 (flags | NIX_TM_NODE_ENABLED);
2360
2361         if (tm_node->flags == flags)
2362                 return 0;
2363
2364         /* send mbox for state change */
2365         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2366
2367         req->lvl = tm_node->hw_lvl;
2368         req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2369                                            req->reg, req->regval);
2370         rc = send_tm_reqval(mbox, req, error);
2371         if (!rc)
2372                 tm_node->flags = flags;
2373         return rc;
2374 }
2375
2376 static int
2377 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2378                          struct rte_tm_error *error)
2379 {
2380         return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2381 }
2382
2383 static int
2384 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2385                         struct rte_tm_error *error)
2386 {
2387         return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2388 }
2389
2390 static int
2391 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2392                              int clear_on_fail,
2393                              struct rte_tm_error *error)
2394 {
2395         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2396         struct otx2_nix_tm_node *tm_node;
2397         uint32_t leaf_cnt = 0;
2398         int rc;
2399
2400         if (dev->tm_flags & NIX_TM_COMMITTED) {
2401                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2402                 error->message = "hierarchy exists";
2403                 return -EINVAL;
2404         }
2405
2406         /* Check if we have all the leaf nodes */
2407         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2408                 if (tm_node->flags & NIX_TM_NODE_USER &&
2409                     tm_node->id < dev->tm_leaf_cnt)
2410                         leaf_cnt++;
2411         }
2412
2413         if (leaf_cnt != dev->tm_leaf_cnt) {
2414                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2415                 error->message = "incomplete hierarchy";
2416                 return -EINVAL;
2417         }
2418
2419         /*
2420          * Disable xmit will be enabled when
2421          * new topology is available.
2422          */
2423         rc = nix_xmit_disable(eth_dev);
2424         if (rc) {
2425                 otx2_err("failed to disable TX, rc=%d", rc);
2426                 return -EIO;
2427         }
2428
2429         /* Delete default/ratelimit tree */
2430         if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2431                 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2432                 if (rc) {
2433                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2434                         error->message = "failed to free default resources";
2435                         return rc;
2436                 }
2437                 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2438                                    NIX_TM_RATE_LIMIT_TREE);
2439         }
2440
2441         /* Free up user alloc'ed resources */
2442         rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2443                                    NIX_TM_NODE_USER, true);
2444         if (rc) {
2445                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2446                 error->message = "failed to free user resources";
2447                 return rc;
2448         }
2449
2450         rc = nix_tm_alloc_resources(eth_dev, true);
2451         if (rc) {
2452                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2453                 error->message = "alloc resources failed";
2454                 /* TODO should we restore default config ? */
2455                 if (clear_on_fail)
2456                         nix_tm_free_resources(dev, 0, 0, false);
2457                 return rc;
2458         }
2459
2460         error->type = RTE_TM_ERROR_TYPE_NONE;
2461         dev->tm_flags |= NIX_TM_COMMITTED;
2462         return 0;
2463 }
2464
2465 static int
2466 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2467                                uint32_t node_id,
2468                                uint32_t profile_id,
2469                                struct rte_tm_error *error)
2470 {
2471         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2472         struct otx2_nix_tm_shaper_profile *profile = NULL;
2473         struct otx2_mbox *mbox = dev->mbox;
2474         struct otx2_nix_tm_node *tm_node;
2475         struct nix_txschq_config *req;
2476         uint8_t k;
2477         int rc;
2478
2479         tm_node = nix_tm_node_search(dev, node_id, true);
2480         if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2481                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2482                 error->message = "invalid node";
2483                 return -EINVAL;
2484         }
2485
2486         if (profile_id == tm_node->params.shaper_profile_id)
2487                 return 0;
2488
2489         if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2490                 profile = nix_tm_shaper_profile_search(dev, profile_id);
2491                 if (!profile) {
2492                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2493                         error->message = "shaper profile ID not exist";
2494                         return -EINVAL;
2495                 }
2496         }
2497
2498         tm_node->params.shaper_profile_id = profile_id;
2499
2500         /* Nothing to do if not yet committed */
2501         if (!(dev->tm_flags & NIX_TM_COMMITTED))
2502                 return 0;
2503
2504         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2505
2506         /* Flush the specific node with SW_XOFF */
2507         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2508         req->lvl = tm_node->hw_lvl;
2509         k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2510         req->num_regs = k;
2511
2512         rc = send_tm_reqval(mbox, req, error);
2513         if (rc)
2514                 return rc;
2515
2516         shaper_default_red_algo(dev, tm_node, profile);
2517
2518         /* Update the PIR/CIR and clear SW XOFF */
2519         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2520         req->lvl = tm_node->hw_lvl;
2521
2522         k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2523
2524         k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2525
2526         req->num_regs = k;
2527         rc = send_tm_reqval(mbox, req, error);
2528         if (!rc)
2529                 tm_node->flags |= NIX_TM_NODE_ENABLED;
2530         return rc;
2531 }
2532
2533 static int
2534 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2535                                uint32_t node_id, uint32_t new_parent_id,
2536                                uint32_t priority, uint32_t weight,
2537                                struct rte_tm_error *error)
2538 {
2539         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2540         struct otx2_nix_tm_node *tm_node, *sibling;
2541         struct otx2_nix_tm_node *new_parent;
2542         struct nix_txschq_config *req;
2543         uint8_t k;
2544         int rc;
2545
2546         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2547                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2548                 error->message = "hierarchy doesn't exist";
2549                 return -EINVAL;
2550         }
2551
2552         tm_node = nix_tm_node_search(dev, node_id, true);
2553         if (!tm_node) {
2554                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2555                 error->message = "no such node";
2556                 return -EINVAL;
2557         }
2558
2559         /* Parent id valid only for non root nodes */
2560         if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2561                 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2562                 if (!new_parent) {
2563                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2564                         error->message = "no such parent node";
2565                         return -EINVAL;
2566                 }
2567
2568                 /* Current support is only for dynamic weight update */
2569                 if (tm_node->parent != new_parent ||
2570                     tm_node->priority != priority) {
2571                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2572                         error->message = "only weight update supported";
2573                         return -EINVAL;
2574                 }
2575         }
2576
2577         /* Skip if no change */
2578         if (tm_node->weight == weight)
2579                 return 0;
2580
2581         tm_node->weight = weight;
2582
2583         /* For leaf nodes, SQ CTX needs update */
2584         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2585                 /* Update SQ quantum data on the fly */
2586                 rc = nix_sq_sched_data(dev, tm_node, true);
2587                 if (rc) {
2588                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2589                         error->message = "sq sched data update failed";
2590                         return rc;
2591                 }
2592         } else {
2593                 /* XOFF Parent node */
2594                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2595                 req->lvl = tm_node->parent->hw_lvl;
2596                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2597                                                    req->reg, req->regval);
2598                 rc = send_tm_reqval(dev->mbox, req, error);
2599                 if (rc)
2600                         return rc;
2601
2602                 /* XOFF this node and all other siblings */
2603                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2604                 req->lvl = tm_node->hw_lvl;
2605
2606                 k = 0;
2607                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2608                         if (sibling->parent != tm_node->parent)
2609                                 continue;
2610                         k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2611                                                 &req->regval[k]);
2612                 }
2613                 req->num_regs = k;
2614                 rc = send_tm_reqval(dev->mbox, req, error);
2615                 if (rc)
2616                         return rc;
2617
2618                 /* Update new weight for current node */
2619                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2620                 req->lvl = tm_node->hw_lvl;
2621                 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2622                                                      req->reg, req->regval);
2623                 rc = send_tm_reqval(dev->mbox, req, error);
2624                 if (rc)
2625                         return rc;
2626
2627                 /* XON this node and all other siblings */
2628                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2629                 req->lvl = tm_node->hw_lvl;
2630
2631                 k = 0;
2632                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2633                         if (sibling->parent != tm_node->parent)
2634                                 continue;
2635                         k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2636                                                 &req->regval[k]);
2637                 }
2638                 req->num_regs = k;
2639                 rc = send_tm_reqval(dev->mbox, req, error);
2640                 if (rc)
2641                         return rc;
2642
2643                 /* XON Parent node */
2644                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2645                 req->lvl = tm_node->parent->hw_lvl;
2646                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2647                                                    req->reg, req->regval);
2648                 rc = send_tm_reqval(dev->mbox, req, error);
2649                 if (rc)
2650                         return rc;
2651         }
2652         return 0;
2653 }
2654
2655 static int
2656 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2657                             struct rte_tm_node_stats *stats,
2658                             uint64_t *stats_mask, int clear,
2659                             struct rte_tm_error *error)
2660 {
2661         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2662         struct otx2_nix_tm_node *tm_node;
2663         uint64_t reg, val;
2664         int64_t *addr;
2665         int rc = 0;
2666
2667         tm_node = nix_tm_node_search(dev, node_id, true);
2668         if (!tm_node) {
2669                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2670                 error->message = "no such node";
2671                 return -EINVAL;
2672         }
2673
2674         /* Stats support only for leaf node or TL1 root */
2675         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2676                 reg = (((uint64_t)tm_node->id) << 32);
2677
2678                 /* Packets */
2679                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2680                 val = otx2_atomic64_add_nosync(reg, addr);
2681                 if (val & OP_ERR)
2682                         val = 0;
2683                 stats->n_pkts = val - tm_node->last_pkts;
2684
2685                 /* Bytes */
2686                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2687                 val = otx2_atomic64_add_nosync(reg, addr);
2688                 if (val & OP_ERR)
2689                         val = 0;
2690                 stats->n_bytes = val - tm_node->last_bytes;
2691
2692                 if (clear) {
2693                         tm_node->last_pkts = stats->n_pkts;
2694                         tm_node->last_bytes = stats->n_bytes;
2695                 }
2696
2697                 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2698
2699         } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2700                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2701                 error->message = "stats read error";
2702
2703                 /* RED Drop packets */
2704                 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2705                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2706                 if (rc)
2707                         goto exit;
2708                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2709                                                 val - tm_node->last_pkts;
2710
2711                 /* RED Drop bytes */
2712                 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2713                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2714                 if (rc)
2715                         goto exit;
2716                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2717                                                 val - tm_node->last_bytes;
2718
2719                 /* Clear stats */
2720                 if (clear) {
2721                         tm_node->last_pkts =
2722                                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2723                         tm_node->last_bytes =
2724                                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2725                 }
2726
2727                 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2728                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2729
2730         } else {
2731                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2732                 error->message = "unsupported node";
2733                 rc = -EINVAL;
2734         }
2735
2736 exit:
2737         return rc;
2738 }
2739
2740 const struct rte_tm_ops otx2_tm_ops = {
2741         .node_type_get = otx2_nix_tm_node_type_get,
2742
2743         .capabilities_get = otx2_nix_tm_capa_get,
2744         .level_capabilities_get = otx2_nix_tm_level_capa_get,
2745         .node_capabilities_get = otx2_nix_tm_node_capa_get,
2746
2747         .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2748         .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2749
2750         .node_add = otx2_nix_tm_node_add,
2751         .node_delete = otx2_nix_tm_node_delete,
2752         .node_suspend = otx2_nix_tm_node_suspend,
2753         .node_resume = otx2_nix_tm_node_resume,
2754         .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2755
2756         .node_shaper_update = otx2_nix_tm_node_shaper_update,
2757         .node_parent_update = otx2_nix_tm_node_parent_update,
2758         .node_stats_read = otx2_nix_tm_node_stats_read,
2759 };
2760
2761 static int
2762 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2763 {
2764         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2765         uint32_t def = eth_dev->data->nb_tx_queues;
2766         struct rte_tm_node_params params;
2767         uint32_t leaf_parent, i;
2768         int rc = 0, leaf_level;
2769
2770         /* Default params */
2771         memset(&params, 0, sizeof(params));
2772         params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2773
2774         if (nix_tm_have_tl1_access(dev)) {
2775                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2776                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2777                                              DEFAULT_RR_WEIGHT,
2778                                              NIX_TXSCH_LVL_TL1,
2779                                              OTX2_TM_LVL_ROOT, false, &params);
2780                 if (rc)
2781                         goto exit;
2782                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2783                                              DEFAULT_RR_WEIGHT,
2784                                              NIX_TXSCH_LVL_TL2,
2785                                              OTX2_TM_LVL_SCH1, false, &params);
2786                 if (rc)
2787                         goto exit;
2788
2789                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2790                                              DEFAULT_RR_WEIGHT,
2791                                              NIX_TXSCH_LVL_TL3,
2792                                              OTX2_TM_LVL_SCH2, false, &params);
2793                 if (rc)
2794                         goto exit;
2795
2796                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2797                                              DEFAULT_RR_WEIGHT,
2798                                              NIX_TXSCH_LVL_TL4,
2799                                              OTX2_TM_LVL_SCH3, false, &params);
2800                 if (rc)
2801                         goto exit;
2802
2803                 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2804                                              DEFAULT_RR_WEIGHT,
2805                                              NIX_TXSCH_LVL_SMQ,
2806                                              OTX2_TM_LVL_SCH4, false, &params);
2807                 if (rc)
2808                         goto exit;
2809
2810                 leaf_parent = def + 4;
2811                 leaf_level = OTX2_TM_LVL_QUEUE;
2812         } else {
2813                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2814                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2815                                              DEFAULT_RR_WEIGHT,
2816                                              NIX_TXSCH_LVL_TL2,
2817                                              OTX2_TM_LVL_ROOT, false, &params);
2818                 if (rc)
2819                         goto exit;
2820
2821                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2822                                              DEFAULT_RR_WEIGHT,
2823                                              NIX_TXSCH_LVL_TL3,
2824                                              OTX2_TM_LVL_SCH1, false, &params);
2825                 if (rc)
2826                         goto exit;
2827
2828                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2829                                              DEFAULT_RR_WEIGHT,
2830                                              NIX_TXSCH_LVL_TL4,
2831                                              OTX2_TM_LVL_SCH2, false, &params);
2832                 if (rc)
2833                         goto exit;
2834
2835                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2836                                              DEFAULT_RR_WEIGHT,
2837                                              NIX_TXSCH_LVL_SMQ,
2838                                              OTX2_TM_LVL_SCH3, false, &params);
2839                 if (rc)
2840                         goto exit;
2841
2842                 leaf_parent = def + 3;
2843                 leaf_level = OTX2_TM_LVL_SCH4;
2844         }
2845
2846         /* Add leaf nodes */
2847         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2848                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2849                                              DEFAULT_RR_WEIGHT,
2850                                              NIX_TXSCH_LVL_CNT,
2851                                              leaf_level, false, &params);
2852                 if (rc)
2853                         break;
2854         }
2855
2856 exit:
2857         return rc;
2858 }
2859
2860 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2861 {
2862         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2863
2864         TAILQ_INIT(&dev->node_list);
2865         TAILQ_INIT(&dev->shaper_profile_list);
2866         dev->tm_rate_min = 1E9; /* 1Gbps */
2867 }
2868
2869 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2870 {
2871         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2872         struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
2873         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2874         int rc;
2875
2876         /* Free up all resources already held */
2877         rc = nix_tm_free_resources(dev, 0, 0, false);
2878         if (rc) {
2879                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2880                 return rc;
2881         }
2882
2883         /* Clear shaper profiles */
2884         nix_tm_clear_shaper_profiles(dev);
2885         dev->tm_flags = NIX_TM_DEFAULT_TREE;
2886
2887         /* Disable TL1 Static Priority when VF's are enabled
2888          * as otherwise VF's TL2 reallocation will be needed
2889          * runtime to support a specific topology of PF.
2890          */
2891         if (pci_dev->max_vfs)
2892                 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2893
2894         rc = nix_tm_prepare_default_tree(eth_dev);
2895         if (rc != 0)
2896                 return rc;
2897
2898         rc = nix_tm_alloc_resources(eth_dev, false);
2899         if (rc != 0)
2900                 return rc;
2901         dev->tm_leaf_cnt = sq_cnt;
2902
2903         return 0;
2904 }
2905
2906 static int
2907 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
2908 {
2909         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2910         uint32_t def = eth_dev->data->nb_tx_queues;
2911         struct rte_tm_node_params params;
2912         uint32_t leaf_parent, i, rc = 0;
2913
2914         memset(&params, 0, sizeof(params));
2915
2916         if (nix_tm_have_tl1_access(dev)) {
2917                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2918                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2919                                         DEFAULT_RR_WEIGHT,
2920                                         NIX_TXSCH_LVL_TL1,
2921                                         OTX2_TM_LVL_ROOT, false, &params);
2922                 if (rc)
2923                         goto error;
2924                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2925                                         DEFAULT_RR_WEIGHT,
2926                                         NIX_TXSCH_LVL_TL2,
2927                                         OTX2_TM_LVL_SCH1, false, &params);
2928                 if (rc)
2929                         goto error;
2930                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2931                                         DEFAULT_RR_WEIGHT,
2932                                         NIX_TXSCH_LVL_TL3,
2933                                         OTX2_TM_LVL_SCH2, false, &params);
2934                 if (rc)
2935                         goto error;
2936                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2937                                         DEFAULT_RR_WEIGHT,
2938                                         NIX_TXSCH_LVL_TL4,
2939                                         OTX2_TM_LVL_SCH3, false, &params);
2940                 if (rc)
2941                         goto error;
2942                 leaf_parent = def + 3;
2943
2944                 /* Add per queue SMQ nodes */
2945                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2946                         rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2947                                                 leaf_parent,
2948                                                 0, DEFAULT_RR_WEIGHT,
2949                                                 NIX_TXSCH_LVL_SMQ,
2950                                                 OTX2_TM_LVL_SCH4,
2951                                                 false, &params);
2952                         if (rc)
2953                                 goto error;
2954                 }
2955
2956                 /* Add leaf nodes */
2957                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2958                         rc = nix_tm_node_add_to_list(dev, i,
2959                                                      leaf_parent + 1 + i, 0,
2960                                                      DEFAULT_RR_WEIGHT,
2961                                                      NIX_TXSCH_LVL_CNT,
2962                                                      OTX2_TM_LVL_QUEUE,
2963                                                      false, &params);
2964                 if (rc)
2965                         goto error;
2966                 }
2967
2968                 return 0;
2969         }
2970
2971         dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2972         rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2973                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
2974                                 OTX2_TM_LVL_ROOT, false, &params);
2975         if (rc)
2976                 goto error;
2977         rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2978                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
2979                                 OTX2_TM_LVL_SCH1, false, &params);
2980         if (rc)
2981                 goto error;
2982         rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2983                                      DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
2984                                      OTX2_TM_LVL_SCH2, false, &params);
2985         if (rc)
2986                 goto error;
2987         leaf_parent = def + 2;
2988
2989         /* Add per queue SMQ nodes */
2990         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2991                 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2992                                              leaf_parent,
2993                                              0, DEFAULT_RR_WEIGHT,
2994                                              NIX_TXSCH_LVL_SMQ,
2995                                              OTX2_TM_LVL_SCH3,
2996                                              false, &params);
2997                 if (rc)
2998                         goto error;
2999         }
3000
3001         /* Add leaf nodes */
3002         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3003                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
3004                                              DEFAULT_RR_WEIGHT,
3005                                              NIX_TXSCH_LVL_CNT,
3006                                              OTX2_TM_LVL_SCH4,
3007                                              false, &params);
3008                 if (rc)
3009                         break;
3010         }
3011 error:
3012         return rc;
3013 }
3014
3015 static int
3016 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3017                            struct otx2_nix_tm_node *tm_node,
3018                            uint64_t tx_rate)
3019 {
3020         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3021         struct otx2_nix_tm_shaper_profile profile;
3022         struct otx2_mbox *mbox = dev->mbox;
3023         volatile uint64_t *reg, *regval;
3024         struct nix_txschq_config *req;
3025         uint16_t flags;
3026         uint8_t k = 0;
3027         int rc;
3028
3029         flags = tm_node->flags;
3030
3031         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3032         req->lvl = NIX_TXSCH_LVL_MDQ;
3033         reg = req->reg;
3034         regval = req->regval;
3035
3036         if (tx_rate == 0) {
3037                 k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
3038                 flags &= ~NIX_TM_NODE_ENABLED;
3039                 goto exit;
3040         }
3041
3042         if (!(flags & NIX_TM_NODE_ENABLED)) {
3043                 k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
3044                 flags |= NIX_TM_NODE_ENABLED;
3045         }
3046
3047         /* Use only PIR for rate limit */
3048         memset(&profile, 0, sizeof(profile));
3049         profile.params.peak.rate = tx_rate;
3050         /* Minimum burst of ~4us Bytes of Tx */
3051         profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3052                                            (4ull * tx_rate) / (1E6 * 8));
3053         if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3054                 dev->tm_rate_min = tx_rate;
3055
3056         k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
3057 exit:
3058         req->num_regs = k;
3059         rc = otx2_mbox_process(mbox);
3060         if (rc)
3061                 return rc;
3062
3063         tm_node->flags = flags;
3064         return 0;
3065 }
3066
3067 int
3068 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3069                                 uint16_t queue_idx, uint16_t tx_rate_mbps)
3070 {
3071         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3072         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3073         struct otx2_nix_tm_node *tm_node;
3074         int rc;
3075
3076         /* Check for supported revisions */
3077         if (otx2_dev_is_95xx_Ax(dev) ||
3078             otx2_dev_is_96xx_Ax(dev))
3079                 return -EINVAL;
3080
3081         if (queue_idx >= eth_dev->data->nb_tx_queues)
3082                 return -EINVAL;
3083
3084         if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3085             !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3086                 goto error;
3087
3088         if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3089             eth_dev->data->nb_tx_queues > 1) {
3090                 /* For TM topology change ethdev needs to be stopped */
3091                 if (eth_dev->data->dev_started)
3092                         return -EBUSY;
3093
3094                 /*
3095                  * Disable xmit will be enabled when
3096                  * new topology is available.
3097                  */
3098                 rc = nix_xmit_disable(eth_dev);
3099                 if (rc) {
3100                         otx2_err("failed to disable TX, rc=%d", rc);
3101                         return -EIO;
3102                 }
3103
3104                 rc = nix_tm_free_resources(dev, 0, 0, false);
3105                 if (rc < 0) {
3106                         otx2_tm_dbg("failed to free default resources, rc %d",
3107                                    rc);
3108                         return -EIO;
3109                 }
3110
3111                 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3112                 if (rc < 0) {
3113                         otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3114                         return rc;
3115                 }
3116
3117                 rc = nix_tm_alloc_resources(eth_dev, true);
3118                 if (rc != 0) {
3119                         otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3120                         return rc;
3121                 }
3122
3123                 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3124                 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3125         }
3126
3127         tm_node = nix_tm_node_search(dev, queue_idx, false);
3128
3129         /* check if we found a valid leaf node */
3130         if (!tm_node ||
3131             !nix_tm_is_leaf(dev, tm_node->lvl) ||
3132             !tm_node->parent ||
3133             tm_node->parent->hw_id == UINT32_MAX)
3134                 return -EIO;
3135
3136         return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3137 error:
3138         otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3139         return -EINVAL;
3140 }
3141
3142 int
3143 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3144 {
3145         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3146
3147         if (!arg)
3148                 return -EINVAL;
3149
3150         /* Check for supported revisions */
3151         if (otx2_dev_is_95xx_Ax(dev) ||
3152             otx2_dev_is_96xx_Ax(dev))
3153                 return -EINVAL;
3154
3155         *(const void **)arg = &otx2_tm_ops;
3156
3157         return 0;
3158 }
3159
3160 int
3161 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3162 {
3163         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3164         int rc;
3165
3166         /* Xmit is assumed to be disabled */
3167         /* Free up resources already held */
3168         rc = nix_tm_free_resources(dev, 0, 0, false);
3169         if (rc) {
3170                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3171                 return rc;
3172         }
3173
3174         /* Clear shaper profiles */
3175         nix_tm_clear_shaper_profiles(dev);
3176
3177         dev->tm_flags = 0;
3178         return 0;
3179 }
3180
3181 int
3182 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3183                           uint32_t *rr_quantum, uint16_t *smq)
3184 {
3185         struct otx2_nix_tm_node *tm_node;
3186         int rc;
3187
3188         /* 0..sq_cnt-1 are leaf nodes */
3189         if (sq >= dev->tm_leaf_cnt)
3190                 return -EINVAL;
3191
3192         /* Search for internal node first */
3193         tm_node = nix_tm_node_search(dev, sq, false);
3194         if (!tm_node)
3195                 tm_node = nix_tm_node_search(dev, sq, true);
3196
3197         /* Check if we found a valid leaf node */
3198         if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3199             !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3200                 return -EIO;
3201         }
3202
3203         /* Get SMQ Id of leaf node's parent */
3204         *smq = tm_node->parent->hw_id;
3205         *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3206
3207         rc = nix_smq_xoff(dev, tm_node->parent, false);
3208         if (rc)
3209                 return rc;
3210         tm_node->flags |= NIX_TM_NODE_ENABLED;
3211
3212         return 0;
3213 }