net/octeontx2: fix minimum length to SMQ config
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_tm.h"
9
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
12
13 enum otx2_tm_node_level {
14         OTX2_TM_LVL_ROOT = 0,
15         OTX2_TM_LVL_SCH1,
16         OTX2_TM_LVL_SCH2,
17         OTX2_TM_LVL_SCH3,
18         OTX2_TM_LVL_SCH4,
19         OTX2_TM_LVL_QUEUE,
20         OTX2_TM_LVL_MAX,
21 };
22
23 static inline
24 uint64_t shaper2regval(struct shaper_params *shaper)
25 {
26         return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27                 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28                 (shaper->mantissa << 1);
29 }
30
31 int
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
33 {
34         int link = 13 /* SDP */;
35         uint16_t lmac_chan;
36         uint16_t map;
37
38         lmac_chan = dev->tx_chan_base;
39
40         /* CGX lmac link */
41         if (lmac_chan >= 0x800) {
42                 map = lmac_chan & 0x7FF;
43                 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44         } else if (lmac_chan < 0x700) {
45                 /* LBK channel */
46                 link = 12;
47         }
48
49         return link;
50 }
51
52 static uint8_t
53 nix_get_relchan(struct otx2_eth_dev *dev)
54 {
55         return dev->tx_chan_base & 0xff;
56 }
57
58 static bool
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
60 {
61         bool is_lbk = otx2_dev_is_lbk(dev);
62         return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
63 }
64
65 static bool
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
67 {
68         if (nix_tm_have_tl1_access(dev))
69                 return (lvl == OTX2_TM_LVL_QUEUE);
70
71         return (lvl == OTX2_TM_LVL_SCH4);
72 }
73
74 static int
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
76 {
77         struct otx2_nix_tm_node *child_node;
78
79         TAILQ_FOREACH(child_node, &dev->node_list, node) {
80                 if (!child_node->parent)
81                         continue;
82                 if (!(child_node->parent->id == node_id))
83                         continue;
84                 if (child_node->priority == child_node->parent->rr_prio)
85                         continue;
86                 return child_node->hw_id - child_node->priority;
87         }
88         return 0;
89 }
90
91
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
94 {
95         struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
96
97         TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98                 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99                         return tm_shaper_profile;
100         }
101         return NULL;
102 }
103
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106                    uint64_t *mantissa_p, uint64_t *div_exp_p)
107 {
108         uint64_t div_exp, exponent, mantissa;
109
110         /* Boundary checks */
111         if (value < MIN_SHAPER_RATE ||
112             value > MAX_SHAPER_RATE)
113                 return 0;
114
115         if (value <= SHAPER_RATE(0, 0, 0)) {
116                 /* Calculate rate div_exp and mantissa using
117                  * the following formula:
118                  *
119                  * value = (2E6 * (256 + mantissa)
120                  *              / ((1 << div_exp) * 256))
121                  */
122                 div_exp = 0;
123                 exponent = 0;
124                 mantissa = MAX_RATE_MANTISSA;
125
126                 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
127                         div_exp += 1;
128
129                 while (value <
130                        ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131                         ((1 << div_exp) * 256)))
132                         mantissa -= 1;
133         } else {
134                 /* Calculate rate exponent and mantissa using
135                  * the following formula:
136                  *
137                  * value = (2E6 * ((256 + mantissa) << exponent)) / 256
138                  *
139                  */
140                 div_exp = 0;
141                 exponent = MAX_RATE_EXPONENT;
142                 mantissa = MAX_RATE_MANTISSA;
143
144                 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
145                         exponent -= 1;
146
147                 while (value < ((NIX_SHAPER_RATE_CONST *
148                                 ((256 + mantissa) << exponent)) / 256))
149                         mantissa -= 1;
150         }
151
152         if (div_exp > MAX_RATE_DIV_EXP ||
153             exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
154                 return 0;
155
156         if (div_exp_p)
157                 *div_exp_p = div_exp;
158         if (exponent_p)
159                 *exponent_p = exponent;
160         if (mantissa_p)
161                 *mantissa_p = mantissa;
162
163         /* Calculate real rate value */
164         return SHAPER_RATE(exponent, mantissa, div_exp);
165 }
166
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169                     uint64_t *mantissa_p)
170 {
171         uint64_t exponent, mantissa;
172
173         if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
174                 return 0;
175
176         /* Calculate burst exponent and mantissa using
177          * the following formula:
178          *
179          * value = (((256 + mantissa) << (exponent + 1)
180          / 256)
181          *
182          */
183         exponent = MAX_BURST_EXPONENT;
184         mantissa = MAX_BURST_MANTISSA;
185
186         while (value < (1ull << (exponent + 1)))
187                 exponent -= 1;
188
189         while (value < ((256 + mantissa) << (exponent + 1)) / 256)
190                 mantissa -= 1;
191
192         if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
193                 return 0;
194
195         if (exponent_p)
196                 *exponent_p = exponent;
197         if (mantissa_p)
198                 *mantissa_p = mantissa;
199
200         return SHAPER_BURST(exponent, mantissa);
201 }
202
203 static void
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205                      struct shaper_params *cir,
206                      struct shaper_params *pir)
207 {
208         struct rte_tm_shaper_params *param = &profile->params;
209
210         if (!profile)
211                 return;
212
213         /* Calculate CIR exponent and mantissa */
214         if (param->committed.rate)
215                 cir->rate = shaper_rate_to_nix(param->committed.rate,
216                                                &cir->exponent,
217                                                &cir->mantissa,
218                                                &cir->div_exp);
219
220         /* Calculate PIR exponent and mantissa */
221         if (param->peak.rate)
222                 pir->rate = shaper_rate_to_nix(param->peak.rate,
223                                                &pir->exponent,
224                                                &pir->mantissa,
225                                                &pir->div_exp);
226
227         /* Calculate CIR burst exponent and mantissa */
228         if (param->committed.size)
229                 cir->burst = shaper_burst_to_nix(param->committed.size,
230                                                  &cir->burst_exponent,
231                                                  &cir->burst_mantissa);
232
233         /* Calculate PIR burst exponent and mantissa */
234         if (param->peak.size)
235                 pir->burst = shaper_burst_to_nix(param->peak.size,
236                                                  &pir->burst_exponent,
237                                                  &pir->burst_mantissa);
238 }
239
240 static void
241 shaper_default_red_algo(struct otx2_eth_dev *dev,
242                         struct otx2_nix_tm_node *tm_node,
243                         struct otx2_nix_tm_shaper_profile *profile)
244 {
245         struct shaper_params cir, pir;
246
247         /* C0 doesn't support STALL when both PIR & CIR are enabled */
248         if (profile && otx2_dev_is_96xx_Cx(dev)) {
249                 memset(&cir, 0, sizeof(cir));
250                 memset(&pir, 0, sizeof(pir));
251                 shaper_config_to_nix(profile, &cir, &pir);
252
253                 if (pir.rate && cir.rate) {
254                         tm_node->red_algo = NIX_REDALG_DISCARD;
255                         tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
256                         return;
257                 }
258         }
259
260         tm_node->red_algo = NIX_REDALG_STD;
261         tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
262 }
263
264 static int
265 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
266 {
267         struct otx2_mbox *mbox = dev->mbox;
268         struct nix_txschq_config *req;
269
270         /*
271          * Default config for TL1.
272          * For VF this is always ignored.
273          */
274
275         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
276         req->lvl = NIX_TXSCH_LVL_TL1;
277
278         /* Set DWRR quantum */
279         req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
280         req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
281         req->num_regs++;
282
283         req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
284         req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
285         req->num_regs++;
286
287         req->reg[2] = NIX_AF_TL1X_CIR(schq);
288         req->regval[2] = 0;
289         req->num_regs++;
290
291         return otx2_mbox_process(mbox);
292 }
293
294 static uint8_t
295 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
296                      struct otx2_nix_tm_node *tm_node,
297                      volatile uint64_t *reg, volatile uint64_t *regval)
298 {
299         uint64_t strict_prio = tm_node->priority;
300         uint32_t hw_lvl = tm_node->hw_lvl;
301         uint32_t schq = tm_node->hw_id;
302         uint64_t rr_quantum;
303         uint8_t k = 0;
304
305         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
306
307         /* For children to root, strict prio is default if either
308          * device root is TL2 or TL1 Static Priority is disabled.
309          */
310         if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
311             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
312              dev->tm_flags & NIX_TM_TL1_NO_SP))
313                 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
314
315         otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
316                      "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
317                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
318                      tm_node->id, strict_prio, rr_quantum, tm_node);
319
320         switch (hw_lvl) {
321         case NIX_TXSCH_LVL_SMQ:
322                 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
323                 regval[k] = (strict_prio << 24) | rr_quantum;
324                 k++;
325
326                 break;
327         case NIX_TXSCH_LVL_TL4:
328                 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
329                 regval[k] = (strict_prio << 24) | rr_quantum;
330                 k++;
331
332                 break;
333         case NIX_TXSCH_LVL_TL3:
334                 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
335                 regval[k] = (strict_prio << 24) | rr_quantum;
336                 k++;
337
338                 break;
339         case NIX_TXSCH_LVL_TL2:
340                 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
341                 regval[k] = (strict_prio << 24) | rr_quantum;
342                 k++;
343
344                 break;
345         case NIX_TXSCH_LVL_TL1:
346                 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
347                 regval[k] = rr_quantum;
348                 k++;
349
350                 break;
351         }
352
353         return k;
354 }
355
356 static uint8_t
357 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
358                       struct otx2_nix_tm_shaper_profile *profile,
359                       volatile uint64_t *reg, volatile uint64_t *regval)
360 {
361         struct shaper_params cir, pir;
362         uint32_t schq = tm_node->hw_id;
363         uint8_t k = 0;
364
365         memset(&cir, 0, sizeof(cir));
366         memset(&pir, 0, sizeof(pir));
367         shaper_config_to_nix(profile, &cir, &pir);
368
369         otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
370                     "pir %" PRIu64 "(%" PRIu64 "B),"
371                      " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
372                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
373                      tm_node->id, pir.rate, pir.burst,
374                      cir.rate, cir.burst, tm_node);
375
376         switch (tm_node->hw_lvl) {
377         case NIX_TXSCH_LVL_SMQ:
378                 /* Configure PIR, CIR */
379                 reg[k] = NIX_AF_MDQX_PIR(schq);
380                 regval[k] = (pir.rate && pir.burst) ?
381                                 (shaper2regval(&pir) | 1) : 0;
382                 k++;
383
384                 reg[k] = NIX_AF_MDQX_CIR(schq);
385                 regval[k] = (cir.rate && cir.burst) ?
386                                 (shaper2regval(&cir) | 1) : 0;
387                 k++;
388
389                 /* Configure RED ALG */
390                 reg[k] = NIX_AF_MDQX_SHAPE(schq);
391                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
392                 k++;
393                 break;
394         case NIX_TXSCH_LVL_TL4:
395                 /* Configure PIR, CIR */
396                 reg[k] = NIX_AF_TL4X_PIR(schq);
397                 regval[k] = (pir.rate && pir.burst) ?
398                                 (shaper2regval(&pir) | 1) : 0;
399                 k++;
400
401                 reg[k] = NIX_AF_TL4X_CIR(schq);
402                 regval[k] = (cir.rate && cir.burst) ?
403                                 (shaper2regval(&cir) | 1) : 0;
404                 k++;
405
406                 /* Configure RED algo */
407                 reg[k] = NIX_AF_TL4X_SHAPE(schq);
408                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
409                 k++;
410                 break;
411         case NIX_TXSCH_LVL_TL3:
412                 /* Configure PIR, CIR */
413                 reg[k] = NIX_AF_TL3X_PIR(schq);
414                 regval[k] = (pir.rate && pir.burst) ?
415                                 (shaper2regval(&pir) | 1) : 0;
416                 k++;
417
418                 reg[k] = NIX_AF_TL3X_CIR(schq);
419                 regval[k] = (cir.rate && cir.burst) ?
420                                 (shaper2regval(&cir) | 1) : 0;
421                 k++;
422
423                 /* Configure RED algo */
424                 reg[k] = NIX_AF_TL3X_SHAPE(schq);
425                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
426                 k++;
427
428                 break;
429         case NIX_TXSCH_LVL_TL2:
430                 /* Configure PIR, CIR */
431                 reg[k] = NIX_AF_TL2X_PIR(schq);
432                 regval[k] = (pir.rate && pir.burst) ?
433                                 (shaper2regval(&pir) | 1) : 0;
434                 k++;
435
436                 reg[k] = NIX_AF_TL2X_CIR(schq);
437                 regval[k] = (cir.rate && cir.burst) ?
438                                 (shaper2regval(&cir) | 1) : 0;
439                 k++;
440
441                 /* Configure RED algo */
442                 reg[k] = NIX_AF_TL2X_SHAPE(schq);
443                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
444                 k++;
445
446                 break;
447         case NIX_TXSCH_LVL_TL1:
448                 /* Configure CIR */
449                 reg[k] = NIX_AF_TL1X_CIR(schq);
450                 regval[k] = (cir.rate && cir.burst) ?
451                                 (shaper2regval(&cir) | 1) : 0;
452                 k++;
453                 break;
454         }
455
456         return k;
457 }
458
459 static uint8_t
460 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
461                    volatile uint64_t *reg, volatile uint64_t *regval)
462 {
463         uint32_t hw_lvl = tm_node->hw_lvl;
464         uint32_t schq = tm_node->hw_id;
465         uint8_t k = 0;
466
467         otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
468                     nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
469                     tm_node->id, enable, tm_node);
470
471         regval[k] = enable;
472
473         switch (hw_lvl) {
474         case NIX_TXSCH_LVL_MDQ:
475                 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
476                 k++;
477                 break;
478         case NIX_TXSCH_LVL_TL4:
479                 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
480                 k++;
481                 break;
482         case NIX_TXSCH_LVL_TL3:
483                 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
484                 k++;
485                 break;
486         case NIX_TXSCH_LVL_TL2:
487                 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
488                 k++;
489                 break;
490         case NIX_TXSCH_LVL_TL1:
491                 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
492                 k++;
493                 break;
494         default:
495                 break;
496         }
497
498         return k;
499 }
500
501 static int
502 populate_tm_reg(struct otx2_eth_dev *dev,
503                 struct otx2_nix_tm_node *tm_node)
504 {
505         struct otx2_nix_tm_shaper_profile *profile;
506         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
507         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
508         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
509         struct otx2_mbox *mbox = dev->mbox;
510         uint64_t parent = 0, child = 0;
511         uint32_t hw_lvl, rr_prio, schq;
512         struct nix_txschq_config *req;
513         int rc = -EFAULT;
514         uint8_t k = 0;
515
516         memset(regval_mask, 0, sizeof(regval_mask));
517         profile = nix_tm_shaper_profile_search(dev,
518                                         tm_node->params.shaper_profile_id);
519         rr_prio = tm_node->rr_prio;
520         hw_lvl = tm_node->hw_lvl;
521         schq = tm_node->hw_id;
522
523         /* Root node will not have a parent node */
524         if (hw_lvl == dev->otx2_tm_root_lvl)
525                 parent = tm_node->parent_hw_id;
526         else
527                 parent = tm_node->parent->hw_id;
528
529         /* Do we need this trigger to configure TL1 */
530         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
531             hw_lvl == dev->otx2_tm_root_lvl) {
532                 rc = populate_tm_tl1_default(dev, parent);
533                 if (rc)
534                         goto error;
535         }
536
537         if (hw_lvl != NIX_TXSCH_LVL_SMQ)
538                 child = find_prio_anchor(dev, tm_node->id);
539
540         /* Override default rr_prio when TL1
541          * Static Priority is disabled
542          */
543         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
544             dev->tm_flags & NIX_TM_TL1_NO_SP) {
545                 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
546                 child = 0;
547         }
548
549         otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
550                     " prio_anchor %"PRIu64" rr_prio %u (%p)",
551                     nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
552                     parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
553
554         /* Prepare Topology and Link config */
555         switch (hw_lvl) {
556         case NIX_TXSCH_LVL_SMQ:
557
558                 /* Set xoff which will be cleared later and minimum length
559                  * which will be used for zero padding if packet length is
560                  * smaller
561                  */
562                 reg[k] = NIX_AF_SMQX_CFG(schq);
563                 regval[k] = BIT_ULL(50) | NIX_MIN_HW_FRS;
564                 regval_mask[k] = ~(BIT_ULL(50) | 0x7f);
565                 k++;
566
567                 /* Parent and schedule conf */
568                 reg[k] = NIX_AF_MDQX_PARENT(schq);
569                 regval[k] = parent << 16;
570                 k++;
571
572                 break;
573         case NIX_TXSCH_LVL_TL4:
574                 /* Parent and schedule conf */
575                 reg[k] = NIX_AF_TL4X_PARENT(schq);
576                 regval[k] = parent << 16;
577                 k++;
578
579                 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
580                 regval[k] = (child << 32) | (rr_prio << 1);
581                 k++;
582
583                 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
584                 if (otx2_dev_is_sdp(dev)) {
585                         reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
586                         regval[k] = BIT_ULL(12);
587                         k++;
588                 }
589                 break;
590         case NIX_TXSCH_LVL_TL3:
591                 /* Parent and schedule conf */
592                 reg[k] = NIX_AF_TL3X_PARENT(schq);
593                 regval[k] = parent << 16;
594                 k++;
595
596                 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
597                 regval[k] = (child << 32) | (rr_prio << 1);
598                 k++;
599
600                 /* Link configuration */
601                 if (!otx2_dev_is_sdp(dev) &&
602                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
603                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
604                                                 otx2_nix_get_link(dev));
605                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
606                         k++;
607                 }
608
609                 break;
610         case NIX_TXSCH_LVL_TL2:
611                 /* Parent and schedule conf */
612                 reg[k] = NIX_AF_TL2X_PARENT(schq);
613                 regval[k] = parent << 16;
614                 k++;
615
616                 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
617                 regval[k] = (child << 32) | (rr_prio << 1);
618                 k++;
619
620                 /* Link configuration */
621                 if (!otx2_dev_is_sdp(dev) &&
622                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
623                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
624                                                 otx2_nix_get_link(dev));
625                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
626                         k++;
627                 }
628
629                 break;
630         case NIX_TXSCH_LVL_TL1:
631                 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
632                 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
633                 k++;
634
635                 break;
636         }
637
638         /* Prepare schedule config */
639         k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
640
641         /* Prepare shaping config */
642         k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
643
644         if (!k)
645                 return 0;
646
647         /* Copy and send config mbox */
648         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
649         req->lvl = hw_lvl;
650         req->num_regs = k;
651
652         otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
653         otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
654         otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
655
656         rc = otx2_mbox_process(mbox);
657         if (rc)
658                 goto error;
659
660         return 0;
661 error:
662         otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
663         return rc;
664 }
665
666
667 static int
668 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
669 {
670         struct otx2_nix_tm_node *tm_node;
671         uint32_t hw_lvl;
672         int rc = 0;
673
674         for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
675                 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
676                         if (tm_node->hw_lvl == hw_lvl &&
677                             tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
678                                 rc = populate_tm_reg(dev, tm_node);
679                                 if (rc)
680                                         goto exit;
681                         }
682                 }
683         }
684 exit:
685         return rc;
686 }
687
688 static struct otx2_nix_tm_node *
689 nix_tm_node_search(struct otx2_eth_dev *dev,
690                    uint32_t node_id, bool user)
691 {
692         struct otx2_nix_tm_node *tm_node;
693
694         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
695                 if (tm_node->id == node_id &&
696                     (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
697                         return tm_node;
698         }
699         return NULL;
700 }
701
702 static uint32_t
703 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
704 {
705         struct otx2_nix_tm_node *tm_node;
706         uint32_t rr_num = 0;
707
708         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
709                 if (!tm_node->parent)
710                         continue;
711
712                 if (!(tm_node->parent->id == parent_id))
713                         continue;
714
715                 if (tm_node->priority == priority)
716                         rr_num++;
717         }
718         return rr_num;
719 }
720
721 static int
722 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
723 {
724         struct otx2_nix_tm_node *tm_node_child;
725         struct otx2_nix_tm_node *tm_node;
726         struct otx2_nix_tm_node *parent;
727         uint32_t rr_num = 0;
728         uint32_t priority;
729
730         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
731                 if (!tm_node->parent)
732                         continue;
733                 /* Count group of children of same priority i.e are RR */
734                 parent = tm_node->parent;
735                 priority = tm_node->priority;
736                 rr_num = check_rr(dev, priority, parent->id);
737
738                 /* Assuming that multiple RR groups are
739                  * not configured based on capability.
740                  */
741                 if (rr_num > 1) {
742                         parent->rr_prio = priority;
743                         parent->rr_num = rr_num;
744                 }
745
746                 /* Find out static priority children that are not in RR */
747                 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
748                         if (!tm_node_child->parent)
749                                 continue;
750                         if (parent->id != tm_node_child->parent->id)
751                                 continue;
752                         if (parent->max_prio == UINT32_MAX &&
753                             tm_node_child->priority != parent->rr_prio)
754                                 parent->max_prio = 0;
755
756                         if (parent->max_prio < tm_node_child->priority &&
757                             parent->rr_prio != tm_node_child->priority)
758                                 parent->max_prio = tm_node_child->priority;
759                 }
760         }
761
762         return 0;
763 }
764
765 static int
766 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
767                         uint32_t parent_node_id, uint32_t priority,
768                         uint32_t weight, uint16_t hw_lvl,
769                         uint16_t lvl, bool user,
770                         struct rte_tm_node_params *params)
771 {
772         struct otx2_nix_tm_shaper_profile *profile;
773         struct otx2_nix_tm_node *tm_node, *parent_node;
774         uint32_t profile_id;
775
776         profile_id = params->shaper_profile_id;
777         profile = nix_tm_shaper_profile_search(dev, profile_id);
778
779         parent_node = nix_tm_node_search(dev, parent_node_id, user);
780
781         tm_node = rte_zmalloc("otx2_nix_tm_node",
782                               sizeof(struct otx2_nix_tm_node), 0);
783         if (!tm_node)
784                 return -ENOMEM;
785
786         tm_node->lvl = lvl;
787         tm_node->hw_lvl = hw_lvl;
788
789         /* Maintain minimum weight */
790         if (!weight)
791                 weight = 1;
792
793         tm_node->id = node_id;
794         tm_node->priority = priority;
795         tm_node->weight = weight;
796         tm_node->rr_prio = 0xf;
797         tm_node->max_prio = UINT32_MAX;
798         tm_node->hw_id = UINT32_MAX;
799         tm_node->flags = 0;
800         if (user)
801                 tm_node->flags = NIX_TM_NODE_USER;
802         rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
803
804         if (profile)
805                 profile->reference_count++;
806
807         tm_node->parent = parent_node;
808         tm_node->parent_hw_id = UINT32_MAX;
809         shaper_default_red_algo(dev, tm_node, profile);
810
811         TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
812
813         return 0;
814 }
815
816 static int
817 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
818 {
819         struct otx2_nix_tm_shaper_profile *shaper_profile;
820
821         while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
822                 if (shaper_profile->reference_count)
823                         otx2_tm_dbg("Shaper profile %u has non zero references",
824                                     shaper_profile->shaper_profile_id);
825                 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
826                 rte_free(shaper_profile);
827         }
828
829         return 0;
830 }
831
832 static int
833 nix_clear_path_xoff(struct otx2_eth_dev *dev,
834                     struct otx2_nix_tm_node *tm_node)
835 {
836         struct nix_txschq_config *req;
837         struct otx2_nix_tm_node *p;
838         int rc;
839
840         /* Manipulating SW_XOFF not supported on Ax */
841         if (otx2_dev_is_Ax(dev))
842                 return 0;
843
844         /* Enable nodes in path for flush to succeed */
845         if (!nix_tm_is_leaf(dev, tm_node->lvl))
846                 p = tm_node;
847         else
848                 p = tm_node->parent;
849         while (p) {
850                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
851                     (p->flags & NIX_TM_NODE_HWRES)) {
852                         req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
853                         req->lvl = p->hw_lvl;
854                         req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
855                                                            req->regval);
856                         rc = otx2_mbox_process(dev->mbox);
857                         if (rc)
858                                 return rc;
859
860                         p->flags |= NIX_TM_NODE_ENABLED;
861                 }
862                 p = p->parent;
863         }
864
865         return 0;
866 }
867
868 static int
869 nix_smq_xoff(struct otx2_eth_dev *dev,
870              struct otx2_nix_tm_node *tm_node,
871              bool enable)
872 {
873         struct otx2_mbox *mbox = dev->mbox;
874         struct nix_txschq_config *req;
875         uint16_t smq;
876         int rc;
877
878         smq = tm_node->hw_id;
879         otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
880                     enable ? "enable" : "disable");
881
882         rc = nix_clear_path_xoff(dev, tm_node);
883         if (rc)
884                 return rc;
885
886         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
887         req->lvl = NIX_TXSCH_LVL_SMQ;
888         req->num_regs = 1;
889
890         req->reg[0] = NIX_AF_SMQX_CFG(smq);
891         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
892         req->regval_mask[0] = enable ?
893                                 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
894
895         return otx2_mbox_process(mbox);
896 }
897
898 int
899 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
900 {
901         struct otx2_eth_txq *txq = __txq;
902         struct npa_aq_enq_req *req;
903         struct npa_aq_enq_rsp *rsp;
904         struct otx2_npa_lf *lf;
905         struct otx2_mbox *mbox;
906         uint64_t aura_handle;
907         int rc;
908
909         otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
910                     enable ? "enable" : "disable");
911
912         lf = otx2_npa_lf_obj_get();
913         if (!lf)
914                 return -EFAULT;
915         mbox = lf->mbox;
916         /* Set/clear sqb aura fc_ena */
917         aura_handle = txq->sqb_pool->pool_id;
918         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
919
920         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
921         req->ctype = NPA_AQ_CTYPE_AURA;
922         req->op = NPA_AQ_INSTOP_WRITE;
923         /* Below is not needed for aura writes but AF driver needs it */
924         /* AF will translate to associated poolctx */
925         req->aura.pool_addr = req->aura_id;
926
927         req->aura.fc_ena = enable;
928         req->aura_mask.fc_ena = 1;
929
930         rc = otx2_mbox_process(mbox);
931         if (rc)
932                 return rc;
933
934         /* Read back npa aura ctx */
935         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
936
937         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
938         req->ctype = NPA_AQ_CTYPE_AURA;
939         req->op = NPA_AQ_INSTOP_READ;
940
941         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
942         if (rc)
943                 return rc;
944
945         /* Init when enabled as there might be no triggers */
946         if (enable)
947                 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
948         else
949                 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
950         /* Sync write barrier */
951         rte_wmb();
952
953         return 0;
954 }
955
956 static int
957 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
958 {
959         uint16_t sqb_cnt, head_off, tail_off;
960         struct otx2_eth_dev *dev = txq->dev;
961         uint64_t wdata, val, prev;
962         uint16_t sq = txq->sq;
963         int64_t *regaddr;
964         uint64_t timeout;/* 10's of usec */
965
966         /* Wait for enough time based on shaper min rate */
967         timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
968         timeout = timeout / dev->tm_rate_min;
969         if (!timeout)
970                 timeout = 10000;
971
972         wdata = ((uint64_t)sq << 32);
973         regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
974         val = otx2_atomic64_add_nosync(wdata, regaddr);
975
976         /* Spin multiple iterations as "txq->fc_cache_pkts" can still
977          * have space to send pkts even though fc_mem is disabled
978          */
979
980         while (true) {
981                 prev = val;
982                 rte_delay_us(10);
983                 val = otx2_atomic64_add_nosync(wdata, regaddr);
984                 /* Continue on error */
985                 if (val & BIT_ULL(63))
986                         continue;
987
988                 if (prev != val)
989                         continue;
990
991                 sqb_cnt = val & 0xFFFF;
992                 head_off = (val >> 20) & 0x3F;
993                 tail_off = (val >> 28) & 0x3F;
994
995                 /* SQ reached quiescent state */
996                 if (sqb_cnt <= 1 && head_off == tail_off &&
997                     (*txq->fc_mem == txq->nb_sqb_bufs)) {
998                         break;
999                 }
1000
1001                 /* Timeout */
1002                 if (!timeout)
1003                         goto exit;
1004                 timeout--;
1005         }
1006
1007         return 0;
1008 exit:
1009         otx2_nix_tm_dump(dev);
1010         return -EFAULT;
1011 }
1012
1013 /* Flush and disable tx queue and its parent SMQ */
1014 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
1015 {
1016         struct otx2_nix_tm_node *tm_node, *sibling;
1017         struct otx2_eth_txq *txq;
1018         struct otx2_eth_dev *dev;
1019         uint16_t sq;
1020         bool user;
1021         int rc;
1022
1023         txq = _txq;
1024         dev = txq->dev;
1025         sq = txq->sq;
1026
1027         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1028
1029         /* Find the node for this SQ */
1030         tm_node = nix_tm_node_search(dev, sq, user);
1031         if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1032                 otx2_err("Invalid node/state for sq %u", sq);
1033                 return -EFAULT;
1034         }
1035
1036         /* Enable CGX RXTX to drain pkts */
1037         if (!dev_started) {
1038                 /* Though it enables both RX MCAM Entries and CGX Link
1039                  * we assume all the rx queues are stopped way back.
1040                  */
1041                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1042                 rc = otx2_mbox_process(dev->mbox);
1043                 if (rc) {
1044                         otx2_err("cgx start failed, rc=%d", rc);
1045                         return rc;
1046                 }
1047         }
1048
1049         /* Disable smq xoff for case it was enabled earlier */
1050         rc = nix_smq_xoff(dev, tm_node->parent, false);
1051         if (rc) {
1052                 otx2_err("Failed to enable smq %u, rc=%d",
1053                          tm_node->parent->hw_id, rc);
1054                 return rc;
1055         }
1056
1057         /* As per HRM, to disable an SQ, all other SQ's
1058          * that feed to same SMQ must be paused before SMQ flush.
1059          */
1060         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1061                 if (sibling->parent != tm_node->parent)
1062                         continue;
1063                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1064                         continue;
1065
1066                 sq = sibling->id;
1067                 txq = dev->eth_dev->data->tx_queues[sq];
1068                 if (!txq)
1069                         continue;
1070
1071                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1072                 if (rc) {
1073                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1074                         goto cleanup;
1075                 }
1076
1077                 /* Wait for sq entries to be flushed */
1078                 rc = nix_txq_flush_sq_spin(txq);
1079                 if (rc) {
1080                         otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1081                         return rc;
1082                 }
1083         }
1084
1085         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1086
1087         /* Disable and flush */
1088         rc = nix_smq_xoff(dev, tm_node->parent, true);
1089         if (rc) {
1090                 otx2_err("Failed to disable smq %u, rc=%d",
1091                          tm_node->parent->hw_id, rc);
1092                 goto cleanup;
1093         }
1094 cleanup:
1095         /* Restore cgx state */
1096         if (!dev_started) {
1097                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1098                 rc |= otx2_mbox_process(dev->mbox);
1099         }
1100
1101         return rc;
1102 }
1103
1104 int otx2_nix_sq_flush_post(void *_txq)
1105 {
1106         struct otx2_nix_tm_node *tm_node, *sibling;
1107         struct otx2_eth_txq *txq = _txq;
1108         struct otx2_eth_txq *s_txq;
1109         struct otx2_eth_dev *dev;
1110         bool once = false;
1111         uint16_t sq, s_sq;
1112         bool user;
1113         int rc;
1114
1115         dev = txq->dev;
1116         sq = txq->sq;
1117         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1118
1119         /* Find the node for this SQ */
1120         tm_node = nix_tm_node_search(dev, sq, user);
1121         if (!tm_node) {
1122                 otx2_err("Invalid node for sq %u", sq);
1123                 return -EFAULT;
1124         }
1125
1126         /* Enable all the siblings back */
1127         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1128                 if (sibling->parent != tm_node->parent)
1129                         continue;
1130
1131                 if (sibling->id == sq)
1132                         continue;
1133
1134                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1135                         continue;
1136
1137                 s_sq = sibling->id;
1138                 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1139                 if (!s_txq)
1140                         continue;
1141
1142                 if (!once) {
1143                         /* Enable back if any SQ is still present */
1144                         rc = nix_smq_xoff(dev, tm_node->parent, false);
1145                         if (rc) {
1146                                 otx2_err("Failed to enable smq %u, rc=%d",
1147                                          tm_node->parent->hw_id, rc);
1148                                 return rc;
1149                         }
1150                         once = true;
1151                 }
1152
1153                 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1154                 if (rc) {
1155                         otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1156                         return rc;
1157                 }
1158         }
1159
1160         return 0;
1161 }
1162
1163 static int
1164 nix_sq_sched_data(struct otx2_eth_dev *dev,
1165                   struct otx2_nix_tm_node *tm_node,
1166                   bool rr_quantum_only)
1167 {
1168         struct rte_eth_dev *eth_dev = dev->eth_dev;
1169         struct otx2_mbox *mbox = dev->mbox;
1170         uint16_t sq = tm_node->id, smq;
1171         struct nix_aq_enq_req *req;
1172         uint64_t rr_quantum;
1173         int rc;
1174
1175         smq = tm_node->parent->hw_id;
1176         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1177
1178         if (rr_quantum_only)
1179                 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1180         else
1181                 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1182                             sq, smq, rr_quantum);
1183
1184         if (sq > eth_dev->data->nb_tx_queues)
1185                 return -EFAULT;
1186
1187         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1188         req->qidx = sq;
1189         req->ctype = NIX_AQ_CTYPE_SQ;
1190         req->op = NIX_AQ_INSTOP_WRITE;
1191
1192         /* smq update only when needed */
1193         if (!rr_quantum_only) {
1194                 req->sq.smq = smq;
1195                 req->sq_mask.smq = ~req->sq_mask.smq;
1196         }
1197         req->sq.smq_rr_quantum = rr_quantum;
1198         req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1199
1200         rc = otx2_mbox_process(mbox);
1201         if (rc)
1202                 otx2_err("Failed to set smq, rc=%d", rc);
1203         return rc;
1204 }
1205
1206 int otx2_nix_sq_enable(void *_txq)
1207 {
1208         struct otx2_eth_txq *txq = _txq;
1209         int rc;
1210
1211         /* Enable sqb_aura fc */
1212         rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1213         if (rc) {
1214                 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1215                 return rc;
1216         }
1217
1218         return 0;
1219 }
1220
1221 static int
1222 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1223                       uint32_t flags, bool hw_only)
1224 {
1225         struct otx2_nix_tm_shaper_profile *profile;
1226         struct otx2_nix_tm_node *tm_node, *next_node;
1227         struct otx2_mbox *mbox = dev->mbox;
1228         struct nix_txsch_free_req *req;
1229         uint32_t profile_id;
1230         int rc = 0;
1231
1232         next_node = TAILQ_FIRST(&dev->node_list);
1233         while (next_node) {
1234                 tm_node = next_node;
1235                 next_node = TAILQ_NEXT(tm_node, node);
1236
1237                 /* Check for only requested nodes */
1238                 if ((tm_node->flags & flags_mask) != flags)
1239                         continue;
1240
1241                 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1242                     tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1243                     tm_node->flags & NIX_TM_NODE_HWRES) {
1244                         /* Free specific HW resource */
1245                         otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1246                                     nix_hwlvl2str(tm_node->hw_lvl),
1247                                     tm_node->hw_id, tm_node->lvl,
1248                                     tm_node->id, tm_node);
1249
1250                         rc = nix_clear_path_xoff(dev, tm_node);
1251                         if (rc)
1252                                 return rc;
1253
1254                         req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1255                         req->flags = 0;
1256                         req->schq_lvl = tm_node->hw_lvl;
1257                         req->schq = tm_node->hw_id;
1258                         rc = otx2_mbox_process(mbox);
1259                         if (rc)
1260                                 return rc;
1261                         tm_node->flags &= ~NIX_TM_NODE_HWRES;
1262                 }
1263
1264                 /* Leave software elements if needed */
1265                 if (hw_only)
1266                         continue;
1267
1268                 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1269                             tm_node->lvl, tm_node->id, tm_node);
1270
1271                 profile_id = tm_node->params.shaper_profile_id;
1272                 profile = nix_tm_shaper_profile_search(dev, profile_id);
1273                 if (profile)
1274                         profile->reference_count--;
1275
1276                 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1277                 rte_free(tm_node);
1278         }
1279
1280         if (!flags_mask) {
1281                 /* Free all hw resources */
1282                 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1283                 req->flags = TXSCHQ_FREE_ALL;
1284
1285                 return otx2_mbox_process(mbox);
1286         }
1287
1288         return rc;
1289 }
1290
1291 static uint8_t
1292 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1293                        struct nix_txsch_alloc_rsp *rsp)
1294 {
1295         uint16_t schq;
1296         uint8_t lvl;
1297
1298         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1299                 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1300                         dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1301                         dev->txschq_contig_list[lvl][schq] =
1302                                 rsp->schq_contig_list[lvl][schq];
1303                 }
1304
1305                 dev->txschq[lvl] = rsp->schq[lvl];
1306                 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1307         }
1308         return 0;
1309 }
1310
1311 static int
1312 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1313                          struct otx2_nix_tm_node *child,
1314                          struct otx2_nix_tm_node *parent)
1315 {
1316         uint32_t hw_id, schq_con_index, prio_offset;
1317         uint32_t l_id, schq_index;
1318
1319         otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1320                     nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1321
1322         child->flags |= NIX_TM_NODE_HWRES;
1323
1324         /* Process root nodes */
1325         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1326             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1327                 int idx = 0;
1328                 uint32_t tschq_con_index;
1329
1330                 l_id = child->hw_lvl;
1331                 tschq_con_index = dev->txschq_contig_index[l_id];
1332                 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1333                 child->hw_id = hw_id;
1334                 dev->txschq_contig_index[l_id]++;
1335                 /* Update TL1 hw_id for its parent for config purpose */
1336                 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1337                 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1338                 child->parent_hw_id = hw_id;
1339                 return 0;
1340         }
1341         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1342             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1343                 uint32_t tschq_con_index;
1344
1345                 l_id = child->hw_lvl;
1346                 tschq_con_index = dev->txschq_index[l_id];
1347                 hw_id = dev->txschq_list[l_id][tschq_con_index];
1348                 child->hw_id = hw_id;
1349                 dev->txschq_index[l_id]++;
1350                 return 0;
1351         }
1352
1353         /* Process children with parents */
1354         l_id = child->hw_lvl;
1355         schq_index = dev->txschq_index[l_id];
1356         schq_con_index = dev->txschq_contig_index[l_id];
1357
1358         if (child->priority == parent->rr_prio) {
1359                 hw_id = dev->txschq_list[l_id][schq_index];
1360                 child->hw_id = hw_id;
1361                 child->parent_hw_id = parent->hw_id;
1362                 dev->txschq_index[l_id]++;
1363         } else {
1364                 prio_offset = schq_con_index + child->priority;
1365                 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1366                 child->hw_id = hw_id;
1367         }
1368         return 0;
1369 }
1370
1371 static int
1372 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1373 {
1374         struct otx2_nix_tm_node *parent, *child;
1375         uint32_t child_hw_lvl, con_index_inc, i;
1376
1377         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1378                 TAILQ_FOREACH(parent, &dev->node_list, node) {
1379                         child_hw_lvl = parent->hw_lvl - 1;
1380                         if (parent->hw_lvl != i)
1381                                 continue;
1382                         TAILQ_FOREACH(child, &dev->node_list, node) {
1383                                 if (!child->parent)
1384                                         continue;
1385                                 if (child->parent->id != parent->id)
1386                                         continue;
1387                                 nix_tm_assign_id_to_node(dev, child, parent);
1388                         }
1389
1390                         con_index_inc = parent->max_prio + 1;
1391                         dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1392
1393                         /*
1394                          * Explicitly assign id to parent node if it
1395                          * doesn't have a parent
1396                          */
1397                         if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1398                                 nix_tm_assign_id_to_node(dev, parent, NULL);
1399                 }
1400         }
1401         return 0;
1402 }
1403
1404 static uint8_t
1405 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1406                       struct nix_txsch_alloc_req *req, uint8_t lvl)
1407 {
1408         struct otx2_nix_tm_node *tm_node;
1409         uint8_t contig_count;
1410
1411         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1412                 if (lvl == tm_node->hw_lvl) {
1413                         req->schq[lvl - 1] += tm_node->rr_num;
1414                         if (tm_node->max_prio != UINT32_MAX) {
1415                                 contig_count = tm_node->max_prio + 1;
1416                                 req->schq_contig[lvl - 1] += contig_count;
1417                         }
1418                 }
1419                 if (lvl == dev->otx2_tm_root_lvl &&
1420                     dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1421                     tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1422                         req->schq_contig[dev->otx2_tm_root_lvl]++;
1423                 }
1424         }
1425
1426         req->schq[NIX_TXSCH_LVL_TL1] = 1;
1427         req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1428
1429         return 0;
1430 }
1431
1432 static int
1433 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1434                           struct nix_txsch_alloc_req *req)
1435 {
1436         uint8_t i;
1437
1438         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1439                 nix_tm_count_req_schq(dev, req, i);
1440
1441         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1442                 dev->txschq_index[i] = 0;
1443                 dev->txschq_contig_index[i] = 0;
1444         }
1445         return 0;
1446 }
1447
1448 static int
1449 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1450 {
1451         struct otx2_mbox *mbox = dev->mbox;
1452         struct nix_txsch_alloc_req *req;
1453         struct nix_txsch_alloc_rsp *rsp;
1454         int rc;
1455
1456         req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1457
1458         rc = nix_tm_prepare_txschq_req(dev, req);
1459         if (rc)
1460                 return rc;
1461
1462         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1463         if (rc)
1464                 return rc;
1465
1466         nix_tm_copy_rsp_to_dev(dev, rsp);
1467         dev->link_cfg_lvl = rsp->link_cfg_lvl;
1468
1469         nix_tm_assign_hw_id(dev);
1470         return 0;
1471 }
1472
1473 static int
1474 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1475 {
1476         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1477         struct otx2_nix_tm_node *tm_node;
1478         struct otx2_eth_txq *txq;
1479         uint16_t sq;
1480         int rc;
1481
1482         nix_tm_update_parent_info(dev);
1483
1484         rc = nix_tm_send_txsch_alloc_msg(dev);
1485         if (rc) {
1486                 otx2_err("TM failed to alloc tm resources=%d", rc);
1487                 return rc;
1488         }
1489
1490         rc = nix_tm_txsch_reg_config(dev);
1491         if (rc) {
1492                 otx2_err("TM failed to configure sched registers=%d", rc);
1493                 return rc;
1494         }
1495
1496         /* Trigger MTU recalculate as SMQ needs MTU conf */
1497         if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1498                 rc = otx2_nix_recalc_mtu(eth_dev);
1499                 if (rc) {
1500                         otx2_err("TM MTU update failed, rc=%d", rc);
1501                         return rc;
1502                 }
1503         }
1504
1505         /* Mark all non-leaf's as enabled */
1506         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1507                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1508                         tm_node->flags |= NIX_TM_NODE_ENABLED;
1509         }
1510
1511         if (!xmit_enable)
1512                 return 0;
1513
1514         /* Update SQ Sched Data while SQ is idle */
1515         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1516                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1517                         continue;
1518
1519                 rc = nix_sq_sched_data(dev, tm_node, false);
1520                 if (rc) {
1521                         otx2_err("SQ %u sched update failed, rc=%d",
1522                                  tm_node->id, rc);
1523                         return rc;
1524                 }
1525         }
1526
1527         /* Finally XON all SMQ's */
1528         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1529                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1530                         continue;
1531
1532                 rc = nix_smq_xoff(dev, tm_node, false);
1533                 if (rc) {
1534                         otx2_err("Failed to enable smq %u, rc=%d",
1535                                  tm_node->hw_id, rc);
1536                         return rc;
1537                 }
1538         }
1539
1540         /* Enable xmit as all the topology is ready */
1541         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1542                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1543                         continue;
1544
1545                 sq = tm_node->id;
1546                 txq = eth_dev->data->tx_queues[sq];
1547
1548                 rc = otx2_nix_sq_enable(txq);
1549                 if (rc) {
1550                         otx2_err("TM sw xon failed on SQ %u, rc=%d",
1551                                  tm_node->id, rc);
1552                         return rc;
1553                 }
1554                 tm_node->flags |= NIX_TM_NODE_ENABLED;
1555         }
1556
1557         return 0;
1558 }
1559
1560 static int
1561 send_tm_reqval(struct otx2_mbox *mbox,
1562                struct nix_txschq_config *req,
1563                struct rte_tm_error *error)
1564 {
1565         int rc;
1566
1567         if (!req->num_regs ||
1568             req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1569                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1570                 error->message = "invalid config";
1571                 return -EIO;
1572         }
1573
1574         rc = otx2_mbox_process(mbox);
1575         if (rc) {
1576                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1577                 error->message = "unexpected fatal error";
1578         }
1579         return rc;
1580 }
1581
1582 static uint16_t
1583 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1584 {
1585         if (nix_tm_have_tl1_access(dev)) {
1586                 switch (lvl) {
1587                 case OTX2_TM_LVL_ROOT:
1588                         return NIX_TXSCH_LVL_TL1;
1589                 case OTX2_TM_LVL_SCH1:
1590                         return NIX_TXSCH_LVL_TL2;
1591                 case OTX2_TM_LVL_SCH2:
1592                         return NIX_TXSCH_LVL_TL3;
1593                 case OTX2_TM_LVL_SCH3:
1594                         return NIX_TXSCH_LVL_TL4;
1595                 case OTX2_TM_LVL_SCH4:
1596                         return NIX_TXSCH_LVL_SMQ;
1597                 default:
1598                         return NIX_TXSCH_LVL_CNT;
1599                 }
1600         } else {
1601                 switch (lvl) {
1602                 case OTX2_TM_LVL_ROOT:
1603                         return NIX_TXSCH_LVL_TL2;
1604                 case OTX2_TM_LVL_SCH1:
1605                         return NIX_TXSCH_LVL_TL3;
1606                 case OTX2_TM_LVL_SCH2:
1607                         return NIX_TXSCH_LVL_TL4;
1608                 case OTX2_TM_LVL_SCH3:
1609                         return NIX_TXSCH_LVL_SMQ;
1610                 default:
1611                         return NIX_TXSCH_LVL_CNT;
1612                 }
1613         }
1614 }
1615
1616 static uint16_t
1617 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1618 {
1619         if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1620                 return 0;
1621
1622         /* MDQ doesn't support SP */
1623         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1624                 return 0;
1625
1626         /* PF's TL1 with VF's enabled doesn't support SP */
1627         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1628             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1629              (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1630                 return 0;
1631
1632         return TXSCH_TLX_SP_PRIO_MAX - 1;
1633 }
1634
1635
1636 static int
1637 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1638               uint32_t parent_id, uint32_t priority,
1639               struct rte_tm_error *error)
1640 {
1641         uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1642         struct otx2_nix_tm_node *tm_node;
1643         uint32_t rr_num = 0;
1644         int i;
1645
1646         /* Validate priority against max */
1647         if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1648                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1649                 error->message = "unsupported priority value";
1650                 return -EINVAL;
1651         }
1652
1653         if (parent_id == RTE_TM_NODE_ID_NULL)
1654                 return 0;
1655
1656         memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1657         priorities[priority] = 1;
1658
1659         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1660                 if (!tm_node->parent)
1661                         continue;
1662
1663                 if (!(tm_node->flags & NIX_TM_NODE_USER))
1664                         continue;
1665
1666                 if (tm_node->parent->id != parent_id)
1667                         continue;
1668
1669                 priorities[tm_node->priority]++;
1670         }
1671
1672         for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1673                 if (priorities[i] > 1)
1674                         rr_num++;
1675
1676         /* At max, one rr groups per parent */
1677         if (rr_num > 1) {
1678                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1679                 error->message = "multiple DWRR node priority";
1680                 return -EINVAL;
1681         }
1682
1683         /* Check for previous priority to avoid holes in priorities */
1684         if (priority && !priorities[priority - 1]) {
1685                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1686                 error->message = "priority not in order";
1687                 return -EINVAL;
1688         }
1689
1690         return 0;
1691 }
1692
1693 static int
1694 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1695             uint64_t *regval, uint32_t hw_lvl)
1696 {
1697         volatile struct nix_txschq_config *req;
1698         struct nix_txschq_config *rsp;
1699         int rc;
1700
1701         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1702         req->read = 1;
1703         req->lvl = hw_lvl;
1704         req->reg[0] = reg;
1705         req->num_regs = 1;
1706
1707         rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1708         if (rc)
1709                 return rc;
1710         *regval = rsp->regval[0];
1711         return 0;
1712 }
1713
1714 /* Search for min rate in topology */
1715 static void
1716 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1717 {
1718         struct otx2_nix_tm_shaper_profile *profile;
1719         uint64_t rate_min = 1E9; /* 1 Gbps */
1720
1721         TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1722                 if (profile->params.peak.rate &&
1723                     profile->params.peak.rate < rate_min)
1724                         rate_min = profile->params.peak.rate;
1725
1726                 if (profile->params.committed.rate &&
1727                     profile->params.committed.rate < rate_min)
1728                         rate_min = profile->params.committed.rate;
1729         }
1730
1731         dev->tm_rate_min = rate_min;
1732 }
1733
1734 static int
1735 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1736 {
1737         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1738         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1739         uint16_t sqb_cnt, head_off, tail_off;
1740         struct otx2_nix_tm_node *tm_node;
1741         struct otx2_eth_txq *txq;
1742         uint64_t wdata, val;
1743         int i, rc;
1744
1745         otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1746
1747         /* Enable CGX RXTX to drain pkts */
1748         if (!eth_dev->data->dev_started) {
1749                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1750                 rc = otx2_mbox_process(dev->mbox);
1751                 if (rc)
1752                         return rc;
1753         }
1754
1755         /* XON all SMQ's */
1756         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1757                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1758                         continue;
1759                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1760                         continue;
1761
1762                 rc = nix_smq_xoff(dev, tm_node, false);
1763                 if (rc) {
1764                         otx2_err("Failed to enable smq %u, rc=%d",
1765                                  tm_node->hw_id, rc);
1766                         goto cleanup;
1767                 }
1768         }
1769
1770         /* Flush all tx queues */
1771         for (i = 0; i < sq_cnt; i++) {
1772                 txq = eth_dev->data->tx_queues[i];
1773
1774                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1775                 if (rc) {
1776                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1777                         goto cleanup;
1778                 }
1779
1780                 /* Wait for sq entries to be flushed */
1781                 rc = nix_txq_flush_sq_spin(txq);
1782                 if (rc) {
1783                         otx2_err("Failed to drain sq, rc=%d\n", rc);
1784                         goto cleanup;
1785                 }
1786         }
1787
1788         /* XOFF & Flush all SMQ's. HRM mandates
1789          * all SQ's empty before SMQ flush is issued.
1790          */
1791         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1792                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1793                         continue;
1794                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1795                         continue;
1796
1797                 rc = nix_smq_xoff(dev, tm_node, true);
1798                 if (rc) {
1799                         otx2_err("Failed to enable smq %u, rc=%d",
1800                                  tm_node->hw_id, rc);
1801                         goto cleanup;
1802                 }
1803         }
1804
1805         /* Verify sanity of all tx queues */
1806         for (i = 0; i < sq_cnt; i++) {
1807                 txq = eth_dev->data->tx_queues[i];
1808
1809                 wdata = ((uint64_t)txq->sq << 32);
1810                 val = otx2_atomic64_add_nosync(wdata,
1811                                (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1812
1813                 sqb_cnt = val & 0xFFFF;
1814                 head_off = (val >> 20) & 0x3F;
1815                 tail_off = (val >> 28) & 0x3F;
1816
1817                 if (sqb_cnt > 1 || head_off != tail_off ||
1818                     (*txq->fc_mem != txq->nb_sqb_bufs))
1819                         otx2_err("Failed to gracefully flush sq %u", txq->sq);
1820         }
1821
1822 cleanup:
1823         /* restore cgx state */
1824         if (!eth_dev->data->dev_started) {
1825                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1826                 rc |= otx2_mbox_process(dev->mbox);
1827         }
1828
1829         return rc;
1830 }
1831
1832 static int
1833 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1834                           int *is_leaf, struct rte_tm_error *error)
1835 {
1836         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1837         struct otx2_nix_tm_node *tm_node;
1838
1839         if (is_leaf == NULL) {
1840                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1841                 return -EINVAL;
1842         }
1843
1844         tm_node = nix_tm_node_search(dev, node_id, true);
1845         if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1846                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1847                 return -EINVAL;
1848         }
1849         if (nix_tm_is_leaf(dev, tm_node->lvl))
1850                 *is_leaf = true;
1851         else
1852                 *is_leaf = false;
1853         return 0;
1854 }
1855
1856 static int
1857 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1858                      struct rte_tm_capabilities *cap,
1859                      struct rte_tm_error *error)
1860 {
1861         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1862         struct otx2_mbox *mbox = dev->mbox;
1863         int rc, max_nr_nodes = 0, i;
1864         struct free_rsrcs_rsp *rsp;
1865
1866         memset(cap, 0, sizeof(*cap));
1867
1868         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1869         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1870         if (rc) {
1871                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1872                 error->message = "unexpected fatal error";
1873                 return rc;
1874         }
1875
1876         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1877                 max_nr_nodes += rsp->schq[i];
1878
1879         cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1880         /* TL1 level is reserved for PF */
1881         cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1882                                 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1883         cap->non_leaf_nodes_identical = 1;
1884         cap->leaf_nodes_identical = 1;
1885
1886         /* Shaper Capabilities */
1887         cap->shaper_private_n_max = max_nr_nodes;
1888         cap->shaper_n_max = max_nr_nodes;
1889         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1890         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1891         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1892         cap->shaper_pkt_length_adjust_min = 0;
1893         cap->shaper_pkt_length_adjust_max = 0;
1894
1895         /* Schedule Capabilities */
1896         cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1897         cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1898         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1899         cap->sched_wfq_n_groups_max = 1;
1900         cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1901
1902         cap->dynamic_update_mask =
1903                 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1904                 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1905         cap->stats_mask =
1906                 RTE_TM_STATS_N_PKTS |
1907                 RTE_TM_STATS_N_BYTES |
1908                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1909                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1910
1911         for (i = 0; i < RTE_COLORS; i++) {
1912                 cap->mark_vlan_dei_supported[i] = false;
1913                 cap->mark_ip_ecn_tcp_supported[i] = false;
1914                 cap->mark_ip_dscp_supported[i] = false;
1915         }
1916
1917         return 0;
1918 }
1919
1920 static int
1921 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1922                                    struct rte_tm_level_capabilities *cap,
1923                                    struct rte_tm_error *error)
1924 {
1925         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1926         struct otx2_mbox *mbox = dev->mbox;
1927         struct free_rsrcs_rsp *rsp;
1928         uint16_t hw_lvl;
1929         int rc;
1930
1931         memset(cap, 0, sizeof(*cap));
1932
1933         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1934         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1935         if (rc) {
1936                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1937                 error->message = "unexpected fatal error";
1938                 return rc;
1939         }
1940
1941         hw_lvl = nix_tm_lvl2nix(dev, lvl);
1942
1943         if (nix_tm_is_leaf(dev, lvl)) {
1944                 /* Leaf */
1945                 cap->n_nodes_max = dev->tm_leaf_cnt;
1946                 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1947                 cap->leaf_nodes_identical = 1;
1948                 cap->leaf.stats_mask =
1949                         RTE_TM_STATS_N_PKTS |
1950                         RTE_TM_STATS_N_BYTES;
1951
1952         } else if (lvl == OTX2_TM_LVL_ROOT) {
1953                 /* Root node, aka TL2(vf)/TL1(pf) */
1954                 cap->n_nodes_max = 1;
1955                 cap->n_nodes_nonleaf_max = 1;
1956                 cap->non_leaf_nodes_identical = 1;
1957
1958                 cap->nonleaf.shaper_private_supported = true;
1959                 cap->nonleaf.shaper_private_dual_rate_supported =
1960                         nix_tm_have_tl1_access(dev) ? false : true;
1961                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1962                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1963
1964                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
1965                 cap->nonleaf.sched_sp_n_priorities_max =
1966                                         nix_max_prio(dev, hw_lvl) + 1;
1967                 cap->nonleaf.sched_wfq_n_groups_max = 1;
1968                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1969
1970                 if (nix_tm_have_tl1_access(dev))
1971                         cap->nonleaf.stats_mask =
1972                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1973                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1974         } else if ((lvl < OTX2_TM_LVL_MAX) &&
1975                    (hw_lvl < NIX_TXSCH_LVL_CNT)) {
1976                 /* TL2, TL3, TL4, MDQ */
1977                 cap->n_nodes_max = rsp->schq[hw_lvl];
1978                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
1979                 cap->non_leaf_nodes_identical = 1;
1980
1981                 cap->nonleaf.shaper_private_supported = true;
1982                 cap->nonleaf.shaper_private_dual_rate_supported = true;
1983                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1984                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1985
1986                 /* MDQ doesn't support Strict Priority */
1987                 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1988                         cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
1989                 else
1990                         cap->nonleaf.sched_n_children_max =
1991                                 rsp->schq[hw_lvl - 1];
1992                 cap->nonleaf.sched_sp_n_priorities_max =
1993                         nix_max_prio(dev, hw_lvl) + 1;
1994                 cap->nonleaf.sched_wfq_n_groups_max = 1;
1995                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1996         } else {
1997                 /* unsupported level */
1998                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1999                 return rc;
2000         }
2001         return 0;
2002 }
2003
2004 static int
2005 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
2006                           struct rte_tm_node_capabilities *cap,
2007                           struct rte_tm_error *error)
2008 {
2009         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2010         struct otx2_mbox *mbox = dev->mbox;
2011         struct otx2_nix_tm_node *tm_node;
2012         struct free_rsrcs_rsp *rsp;
2013         int rc, hw_lvl, lvl;
2014
2015         memset(cap, 0, sizeof(*cap));
2016
2017         tm_node = nix_tm_node_search(dev, node_id, true);
2018         if (!tm_node) {
2019                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2020                 error->message = "no such node";
2021                 return -EINVAL;
2022         }
2023
2024         hw_lvl = tm_node->hw_lvl;
2025         lvl = tm_node->lvl;
2026
2027         /* Leaf node */
2028         if (nix_tm_is_leaf(dev, lvl)) {
2029                 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2030                                         RTE_TM_STATS_N_BYTES;
2031                 return 0;
2032         }
2033
2034         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2035         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2036         if (rc) {
2037                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2038                 error->message = "unexpected fatal error";
2039                 return rc;
2040         }
2041
2042         /* Non Leaf Shaper */
2043         cap->shaper_private_supported = true;
2044         cap->shaper_private_dual_rate_supported =
2045                 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2046         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2047         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2048
2049         /* Non Leaf Scheduler */
2050         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2051                 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2052         else
2053                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2054
2055         cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2056         cap->nonleaf.sched_wfq_n_children_per_group_max =
2057                 cap->nonleaf.sched_n_children_max;
2058         cap->nonleaf.sched_wfq_n_groups_max = 1;
2059         cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2060
2061         if (hw_lvl == NIX_TXSCH_LVL_TL1)
2062                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2063                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2064         return 0;
2065 }
2066
2067 static int
2068 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2069                                uint32_t profile_id,
2070                                struct rte_tm_shaper_params *params,
2071                                struct rte_tm_error *error)
2072 {
2073         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2074         struct otx2_nix_tm_shaper_profile *profile;
2075
2076         profile = nix_tm_shaper_profile_search(dev, profile_id);
2077         if (profile) {
2078                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2079                 error->message = "shaper profile ID exist";
2080                 return -EINVAL;
2081         }
2082
2083         /* Committed rate and burst size can be enabled/disabled */
2084         if (params->committed.size || params->committed.rate) {
2085                 if (params->committed.size < MIN_SHAPER_BURST ||
2086                     params->committed.size > MAX_SHAPER_BURST) {
2087                         error->type =
2088                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2089                         return -EINVAL;
2090                 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2091                                                NULL, NULL, NULL)) {
2092                         error->type =
2093                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2094                         error->message = "shaper committed rate invalid";
2095                         return -EINVAL;
2096                 }
2097         }
2098
2099         /* Peak rate and burst size can be enabled/disabled */
2100         if (params->peak.size || params->peak.rate) {
2101                 if (params->peak.size < MIN_SHAPER_BURST ||
2102                     params->peak.size > MAX_SHAPER_BURST) {
2103                         error->type =
2104                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2105                         return -EINVAL;
2106                 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2107                                                NULL, NULL, NULL)) {
2108                         error->type =
2109                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2110                         error->message = "shaper peak rate invalid";
2111                         return -EINVAL;
2112                 }
2113         }
2114
2115         profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2116                               sizeof(struct otx2_nix_tm_shaper_profile), 0);
2117         if (!profile)
2118                 return -ENOMEM;
2119
2120         profile->shaper_profile_id = profile_id;
2121         rte_memcpy(&profile->params, params,
2122                    sizeof(struct rte_tm_shaper_params));
2123         TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2124
2125         otx2_tm_dbg("Added TM shaper profile %u, "
2126                     " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2127                     ", cbs %" PRIu64 " , adj %u",
2128                     profile_id,
2129                     params->peak.rate * 8,
2130                     params->peak.size,
2131                     params->committed.rate * 8,
2132                     params->committed.size,
2133                     params->pkt_length_adjust);
2134
2135         /* Translate rate as bits per second */
2136         profile->params.peak.rate = profile->params.peak.rate * 8;
2137         profile->params.committed.rate = profile->params.committed.rate * 8;
2138         /* Always use PIR for single rate shaping */
2139         if (!params->peak.rate && params->committed.rate) {
2140                 profile->params.peak = profile->params.committed;
2141                 memset(&profile->params.committed, 0,
2142                        sizeof(profile->params.committed));
2143         }
2144
2145         /* update min rate */
2146         nix_tm_shaper_profile_update_min(dev);
2147         return 0;
2148 }
2149
2150 static int
2151 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2152                                   uint32_t profile_id,
2153                                   struct rte_tm_error *error)
2154 {
2155         struct otx2_nix_tm_shaper_profile *profile;
2156         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2157
2158         profile = nix_tm_shaper_profile_search(dev, profile_id);
2159
2160         if (!profile) {
2161                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2162                 error->message = "shaper profile ID not exist";
2163                 return -EINVAL;
2164         }
2165
2166         if (profile->reference_count) {
2167                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2168                 error->message = "shaper profile in use";
2169                 return -EINVAL;
2170         }
2171
2172         otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2173         TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2174         rte_free(profile);
2175
2176         /* update min rate */
2177         nix_tm_shaper_profile_update_min(dev);
2178         return 0;
2179 }
2180
2181 static int
2182 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2183                      uint32_t parent_node_id, uint32_t priority,
2184                      uint32_t weight, uint32_t lvl,
2185                      struct rte_tm_node_params *params,
2186                      struct rte_tm_error *error)
2187 {
2188         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2189         struct otx2_nix_tm_node *parent_node;
2190         int rc, clear_on_fail = 0;
2191         uint32_t exp_next_lvl;
2192         uint16_t hw_lvl;
2193
2194         /* we don't support dynamic updates */
2195         if (dev->tm_flags & NIX_TM_COMMITTED) {
2196                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2197                 error->message = "dynamic update not supported";
2198                 return -EIO;
2199         }
2200
2201         /* Leaf nodes have to be same priority */
2202         if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2203                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2204                 error->message = "queue shapers must be priority 0";
2205                 return -EIO;
2206         }
2207
2208         parent_node = nix_tm_node_search(dev, parent_node_id, true);
2209
2210         /* find the right level */
2211         if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2212                 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2213                         lvl = OTX2_TM_LVL_ROOT;
2214                 } else if (parent_node) {
2215                         lvl = parent_node->lvl + 1;
2216                 } else {
2217                         /* Neigher proper parent nor proper level id given */
2218                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2219                         error->message = "invalid parent node id";
2220                         return -ERANGE;
2221                 }
2222         }
2223
2224         /* Translate rte_tm level id's to nix hw level id's */
2225         hw_lvl = nix_tm_lvl2nix(dev, lvl);
2226         if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2227             !nix_tm_is_leaf(dev, lvl)) {
2228                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2229                 error->message = "invalid level id";
2230                 return -ERANGE;
2231         }
2232
2233         if (node_id < dev->tm_leaf_cnt)
2234                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2235         else
2236                 exp_next_lvl = hw_lvl + 1;
2237
2238         /* Check if there is no parent node yet */
2239         if (hw_lvl != dev->otx2_tm_root_lvl &&
2240             (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2241                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2242                 error->message = "invalid parent node id";
2243                 return -EINVAL;
2244         }
2245
2246         /* Check if a node already exists */
2247         if (nix_tm_node_search(dev, node_id, true)) {
2248                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2249                 error->message = "node already exists";
2250                 return -EINVAL;
2251         }
2252
2253         /* Check if shaper profile exists for non leaf node */
2254         if (!nix_tm_is_leaf(dev, lvl) &&
2255             params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
2256             !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) {
2257                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2258                 error->message = "invalid shaper profile";
2259                 return -EINVAL;
2260         }
2261
2262         /* Check if there is second DWRR already in siblings or holes in prio */
2263         if (validate_prio(dev, lvl, parent_node_id, priority, error))
2264                 return -EINVAL;
2265
2266         if (weight > MAX_SCHED_WEIGHT) {
2267                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2268                 error->message = "max weight exceeded";
2269                 return -EINVAL;
2270         }
2271
2272         rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2273                                      priority, weight, hw_lvl,
2274                                      lvl, true, params);
2275         if (rc) {
2276                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2277                 /* cleanup user added nodes */
2278                 if (clear_on_fail)
2279                         nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2280                                               NIX_TM_NODE_USER, false);
2281                 error->message = "failed to add node";
2282                 return rc;
2283         }
2284         error->type = RTE_TM_ERROR_TYPE_NONE;
2285         return 0;
2286 }
2287
2288 static int
2289 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2290                         struct rte_tm_error *error)
2291 {
2292         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2293         struct otx2_nix_tm_node *tm_node, *child_node;
2294         struct otx2_nix_tm_shaper_profile *profile;
2295         uint32_t profile_id;
2296
2297         /* we don't support dynamic updates yet */
2298         if (dev->tm_flags & NIX_TM_COMMITTED) {
2299                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2300                 error->message = "hierarchy exists";
2301                 return -EIO;
2302         }
2303
2304         if (node_id == RTE_TM_NODE_ID_NULL) {
2305                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2306                 error->message = "invalid node id";
2307                 return -EINVAL;
2308         }
2309
2310         tm_node = nix_tm_node_search(dev, node_id, true);
2311         if (!tm_node) {
2312                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2313                 error->message = "no such node";
2314                 return -EINVAL;
2315         }
2316
2317         /* Check for any existing children */
2318         TAILQ_FOREACH(child_node, &dev->node_list, node) {
2319                 if (child_node->parent == tm_node) {
2320                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2321                         error->message = "children exist";
2322                         return -EINVAL;
2323                 }
2324         }
2325
2326         /* Remove shaper profile reference */
2327         profile_id = tm_node->params.shaper_profile_id;
2328         profile = nix_tm_shaper_profile_search(dev, profile_id);
2329         profile->reference_count--;
2330
2331         TAILQ_REMOVE(&dev->node_list, tm_node, node);
2332         rte_free(tm_node);
2333         return 0;
2334 }
2335
2336 static int
2337 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2338                            struct rte_tm_error *error, bool suspend)
2339 {
2340         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2341         struct otx2_mbox *mbox = dev->mbox;
2342         struct otx2_nix_tm_node *tm_node;
2343         struct nix_txschq_config *req;
2344         uint16_t flags;
2345         int rc;
2346
2347         tm_node = nix_tm_node_search(dev, node_id, true);
2348         if (!tm_node) {
2349                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2350                 error->message = "no such node";
2351                 return -EINVAL;
2352         }
2353
2354         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2355                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2356                 error->message = "hierarchy doesn't exist";
2357                 return -EINVAL;
2358         }
2359
2360         flags = tm_node->flags;
2361         flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2362                 (flags | NIX_TM_NODE_ENABLED);
2363
2364         if (tm_node->flags == flags)
2365                 return 0;
2366
2367         /* send mbox for state change */
2368         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2369
2370         req->lvl = tm_node->hw_lvl;
2371         req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2372                                            req->reg, req->regval);
2373         rc = send_tm_reqval(mbox, req, error);
2374         if (!rc)
2375                 tm_node->flags = flags;
2376         return rc;
2377 }
2378
2379 static int
2380 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2381                          struct rte_tm_error *error)
2382 {
2383         return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2384 }
2385
2386 static int
2387 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2388                         struct rte_tm_error *error)
2389 {
2390         return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2391 }
2392
2393 static int
2394 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2395                              int clear_on_fail,
2396                              struct rte_tm_error *error)
2397 {
2398         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2399         struct otx2_nix_tm_node *tm_node;
2400         uint32_t leaf_cnt = 0;
2401         int rc;
2402
2403         if (dev->tm_flags & NIX_TM_COMMITTED) {
2404                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2405                 error->message = "hierarchy exists";
2406                 return -EINVAL;
2407         }
2408
2409         /* Check if we have all the leaf nodes */
2410         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2411                 if (tm_node->flags & NIX_TM_NODE_USER &&
2412                     tm_node->id < dev->tm_leaf_cnt)
2413                         leaf_cnt++;
2414         }
2415
2416         if (leaf_cnt != dev->tm_leaf_cnt) {
2417                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2418                 error->message = "incomplete hierarchy";
2419                 return -EINVAL;
2420         }
2421
2422         /*
2423          * Disable xmit will be enabled when
2424          * new topology is available.
2425          */
2426         rc = nix_xmit_disable(eth_dev);
2427         if (rc) {
2428                 otx2_err("failed to disable TX, rc=%d", rc);
2429                 return -EIO;
2430         }
2431
2432         /* Delete default/ratelimit tree */
2433         if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2434                 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2435                 if (rc) {
2436                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2437                         error->message = "failed to free default resources";
2438                         return rc;
2439                 }
2440                 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2441                                    NIX_TM_RATE_LIMIT_TREE);
2442         }
2443
2444         /* Free up user alloc'ed resources */
2445         rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2446                                    NIX_TM_NODE_USER, true);
2447         if (rc) {
2448                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2449                 error->message = "failed to free user resources";
2450                 return rc;
2451         }
2452
2453         rc = nix_tm_alloc_resources(eth_dev, true);
2454         if (rc) {
2455                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2456                 error->message = "alloc resources failed";
2457                 /* TODO should we restore default config ? */
2458                 if (clear_on_fail)
2459                         nix_tm_free_resources(dev, 0, 0, false);
2460                 return rc;
2461         }
2462
2463         error->type = RTE_TM_ERROR_TYPE_NONE;
2464         dev->tm_flags |= NIX_TM_COMMITTED;
2465         return 0;
2466 }
2467
2468 static int
2469 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2470                                uint32_t node_id,
2471                                uint32_t profile_id,
2472                                struct rte_tm_error *error)
2473 {
2474         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2475         struct otx2_nix_tm_shaper_profile *profile = NULL;
2476         struct otx2_mbox *mbox = dev->mbox;
2477         struct otx2_nix_tm_node *tm_node;
2478         struct nix_txschq_config *req;
2479         uint8_t k;
2480         int rc;
2481
2482         tm_node = nix_tm_node_search(dev, node_id, true);
2483         if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2484                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2485                 error->message = "invalid node";
2486                 return -EINVAL;
2487         }
2488
2489         if (profile_id == tm_node->params.shaper_profile_id)
2490                 return 0;
2491
2492         if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2493                 profile = nix_tm_shaper_profile_search(dev, profile_id);
2494                 if (!profile) {
2495                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2496                         error->message = "shaper profile ID not exist";
2497                         return -EINVAL;
2498                 }
2499         }
2500
2501         tm_node->params.shaper_profile_id = profile_id;
2502
2503         /* Nothing to do if not yet committed */
2504         if (!(dev->tm_flags & NIX_TM_COMMITTED))
2505                 return 0;
2506
2507         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2508
2509         /* Flush the specific node with SW_XOFF */
2510         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2511         req->lvl = tm_node->hw_lvl;
2512         k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2513         req->num_regs = k;
2514
2515         rc = send_tm_reqval(mbox, req, error);
2516         if (rc)
2517                 return rc;
2518
2519         shaper_default_red_algo(dev, tm_node, profile);
2520
2521         /* Update the PIR/CIR and clear SW XOFF */
2522         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2523         req->lvl = tm_node->hw_lvl;
2524
2525         k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2526
2527         k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2528
2529         req->num_regs = k;
2530         rc = send_tm_reqval(mbox, req, error);
2531         if (!rc)
2532                 tm_node->flags |= NIX_TM_NODE_ENABLED;
2533         return rc;
2534 }
2535
2536 static int
2537 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2538                                uint32_t node_id, uint32_t new_parent_id,
2539                                uint32_t priority, uint32_t weight,
2540                                struct rte_tm_error *error)
2541 {
2542         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2543         struct otx2_nix_tm_node *tm_node, *sibling;
2544         struct otx2_nix_tm_node *new_parent;
2545         struct nix_txschq_config *req;
2546         uint8_t k;
2547         int rc;
2548
2549         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2550                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2551                 error->message = "hierarchy doesn't exist";
2552                 return -EINVAL;
2553         }
2554
2555         tm_node = nix_tm_node_search(dev, node_id, true);
2556         if (!tm_node) {
2557                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2558                 error->message = "no such node";
2559                 return -EINVAL;
2560         }
2561
2562         /* Parent id valid only for non root nodes */
2563         if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2564                 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2565                 if (!new_parent) {
2566                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2567                         error->message = "no such parent node";
2568                         return -EINVAL;
2569                 }
2570
2571                 /* Current support is only for dynamic weight update */
2572                 if (tm_node->parent != new_parent ||
2573                     tm_node->priority != priority) {
2574                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2575                         error->message = "only weight update supported";
2576                         return -EINVAL;
2577                 }
2578         }
2579
2580         /* Skip if no change */
2581         if (tm_node->weight == weight)
2582                 return 0;
2583
2584         tm_node->weight = weight;
2585
2586         /* For leaf nodes, SQ CTX needs update */
2587         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2588                 /* Update SQ quantum data on the fly */
2589                 rc = nix_sq_sched_data(dev, tm_node, true);
2590                 if (rc) {
2591                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2592                         error->message = "sq sched data update failed";
2593                         return rc;
2594                 }
2595         } else {
2596                 /* XOFF Parent node */
2597                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2598                 req->lvl = tm_node->parent->hw_lvl;
2599                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2600                                                    req->reg, req->regval);
2601                 rc = send_tm_reqval(dev->mbox, req, error);
2602                 if (rc)
2603                         return rc;
2604
2605                 /* XOFF this node and all other siblings */
2606                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2607                 req->lvl = tm_node->hw_lvl;
2608
2609                 k = 0;
2610                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2611                         if (sibling->parent != tm_node->parent)
2612                                 continue;
2613                         k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2614                                                 &req->regval[k]);
2615                 }
2616                 req->num_regs = k;
2617                 rc = send_tm_reqval(dev->mbox, req, error);
2618                 if (rc)
2619                         return rc;
2620
2621                 /* Update new weight for current node */
2622                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2623                 req->lvl = tm_node->hw_lvl;
2624                 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2625                                                      req->reg, req->regval);
2626                 rc = send_tm_reqval(dev->mbox, req, error);
2627                 if (rc)
2628                         return rc;
2629
2630                 /* XON this node and all other siblings */
2631                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2632                 req->lvl = tm_node->hw_lvl;
2633
2634                 k = 0;
2635                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2636                         if (sibling->parent != tm_node->parent)
2637                                 continue;
2638                         k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2639                                                 &req->regval[k]);
2640                 }
2641                 req->num_regs = k;
2642                 rc = send_tm_reqval(dev->mbox, req, error);
2643                 if (rc)
2644                         return rc;
2645
2646                 /* XON Parent node */
2647                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2648                 req->lvl = tm_node->parent->hw_lvl;
2649                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2650                                                    req->reg, req->regval);
2651                 rc = send_tm_reqval(dev->mbox, req, error);
2652                 if (rc)
2653                         return rc;
2654         }
2655         return 0;
2656 }
2657
2658 static int
2659 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2660                             struct rte_tm_node_stats *stats,
2661                             uint64_t *stats_mask, int clear,
2662                             struct rte_tm_error *error)
2663 {
2664         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2665         struct otx2_nix_tm_node *tm_node;
2666         uint64_t reg, val;
2667         int64_t *addr;
2668         int rc = 0;
2669
2670         tm_node = nix_tm_node_search(dev, node_id, true);
2671         if (!tm_node) {
2672                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2673                 error->message = "no such node";
2674                 return -EINVAL;
2675         }
2676
2677         /* Stats support only for leaf node or TL1 root */
2678         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2679                 reg = (((uint64_t)tm_node->id) << 32);
2680
2681                 /* Packets */
2682                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2683                 val = otx2_atomic64_add_nosync(reg, addr);
2684                 if (val & OP_ERR)
2685                         val = 0;
2686                 stats->n_pkts = val - tm_node->last_pkts;
2687
2688                 /* Bytes */
2689                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2690                 val = otx2_atomic64_add_nosync(reg, addr);
2691                 if (val & OP_ERR)
2692                         val = 0;
2693                 stats->n_bytes = val - tm_node->last_bytes;
2694
2695                 if (clear) {
2696                         tm_node->last_pkts = stats->n_pkts;
2697                         tm_node->last_bytes = stats->n_bytes;
2698                 }
2699
2700                 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2701
2702         } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2703                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2704                 error->message = "stats read error";
2705
2706                 /* RED Drop packets */
2707                 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2708                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2709                 if (rc)
2710                         goto exit;
2711                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2712                                                 val - tm_node->last_pkts;
2713
2714                 /* RED Drop bytes */
2715                 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2716                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2717                 if (rc)
2718                         goto exit;
2719                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2720                                                 val - tm_node->last_bytes;
2721
2722                 /* Clear stats */
2723                 if (clear) {
2724                         tm_node->last_pkts =
2725                                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2726                         tm_node->last_bytes =
2727                                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2728                 }
2729
2730                 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2731                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2732
2733         } else {
2734                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2735                 error->message = "unsupported node";
2736                 rc = -EINVAL;
2737         }
2738
2739 exit:
2740         return rc;
2741 }
2742
2743 const struct rte_tm_ops otx2_tm_ops = {
2744         .node_type_get = otx2_nix_tm_node_type_get,
2745
2746         .capabilities_get = otx2_nix_tm_capa_get,
2747         .level_capabilities_get = otx2_nix_tm_level_capa_get,
2748         .node_capabilities_get = otx2_nix_tm_node_capa_get,
2749
2750         .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2751         .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2752
2753         .node_add = otx2_nix_tm_node_add,
2754         .node_delete = otx2_nix_tm_node_delete,
2755         .node_suspend = otx2_nix_tm_node_suspend,
2756         .node_resume = otx2_nix_tm_node_resume,
2757         .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2758
2759         .node_shaper_update = otx2_nix_tm_node_shaper_update,
2760         .node_parent_update = otx2_nix_tm_node_parent_update,
2761         .node_stats_read = otx2_nix_tm_node_stats_read,
2762 };
2763
2764 static int
2765 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2766 {
2767         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2768         uint32_t def = eth_dev->data->nb_tx_queues;
2769         struct rte_tm_node_params params;
2770         uint32_t leaf_parent, i;
2771         int rc = 0, leaf_level;
2772
2773         /* Default params */
2774         memset(&params, 0, sizeof(params));
2775         params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2776
2777         if (nix_tm_have_tl1_access(dev)) {
2778                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2779                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2780                                              DEFAULT_RR_WEIGHT,
2781                                              NIX_TXSCH_LVL_TL1,
2782                                              OTX2_TM_LVL_ROOT, false, &params);
2783                 if (rc)
2784                         goto exit;
2785                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2786                                              DEFAULT_RR_WEIGHT,
2787                                              NIX_TXSCH_LVL_TL2,
2788                                              OTX2_TM_LVL_SCH1, false, &params);
2789                 if (rc)
2790                         goto exit;
2791
2792                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2793                                              DEFAULT_RR_WEIGHT,
2794                                              NIX_TXSCH_LVL_TL3,
2795                                              OTX2_TM_LVL_SCH2, false, &params);
2796                 if (rc)
2797                         goto exit;
2798
2799                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2800                                              DEFAULT_RR_WEIGHT,
2801                                              NIX_TXSCH_LVL_TL4,
2802                                              OTX2_TM_LVL_SCH3, false, &params);
2803                 if (rc)
2804                         goto exit;
2805
2806                 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2807                                              DEFAULT_RR_WEIGHT,
2808                                              NIX_TXSCH_LVL_SMQ,
2809                                              OTX2_TM_LVL_SCH4, false, &params);
2810                 if (rc)
2811                         goto exit;
2812
2813                 leaf_parent = def + 4;
2814                 leaf_level = OTX2_TM_LVL_QUEUE;
2815         } else {
2816                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2817                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2818                                              DEFAULT_RR_WEIGHT,
2819                                              NIX_TXSCH_LVL_TL2,
2820                                              OTX2_TM_LVL_ROOT, false, &params);
2821                 if (rc)
2822                         goto exit;
2823
2824                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2825                                              DEFAULT_RR_WEIGHT,
2826                                              NIX_TXSCH_LVL_TL3,
2827                                              OTX2_TM_LVL_SCH1, false, &params);
2828                 if (rc)
2829                         goto exit;
2830
2831                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2832                                              DEFAULT_RR_WEIGHT,
2833                                              NIX_TXSCH_LVL_TL4,
2834                                              OTX2_TM_LVL_SCH2, false, &params);
2835                 if (rc)
2836                         goto exit;
2837
2838                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2839                                              DEFAULT_RR_WEIGHT,
2840                                              NIX_TXSCH_LVL_SMQ,
2841                                              OTX2_TM_LVL_SCH3, false, &params);
2842                 if (rc)
2843                         goto exit;
2844
2845                 leaf_parent = def + 3;
2846                 leaf_level = OTX2_TM_LVL_SCH4;
2847         }
2848
2849         /* Add leaf nodes */
2850         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2851                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2852                                              DEFAULT_RR_WEIGHT,
2853                                              NIX_TXSCH_LVL_CNT,
2854                                              leaf_level, false, &params);
2855                 if (rc)
2856                         break;
2857         }
2858
2859 exit:
2860         return rc;
2861 }
2862
2863 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2864 {
2865         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2866
2867         TAILQ_INIT(&dev->node_list);
2868         TAILQ_INIT(&dev->shaper_profile_list);
2869         dev->tm_rate_min = 1E9; /* 1Gbps */
2870 }
2871
2872 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2873 {
2874         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2875         struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
2876         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2877         int rc;
2878
2879         /* Free up all resources already held */
2880         rc = nix_tm_free_resources(dev, 0, 0, false);
2881         if (rc) {
2882                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2883                 return rc;
2884         }
2885
2886         /* Clear shaper profiles */
2887         nix_tm_clear_shaper_profiles(dev);
2888         dev->tm_flags = NIX_TM_DEFAULT_TREE;
2889
2890         /* Disable TL1 Static Priority when VF's are enabled
2891          * as otherwise VF's TL2 reallocation will be needed
2892          * runtime to support a specific topology of PF.
2893          */
2894         if (pci_dev->max_vfs)
2895                 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2896
2897         rc = nix_tm_prepare_default_tree(eth_dev);
2898         if (rc != 0)
2899                 return rc;
2900
2901         rc = nix_tm_alloc_resources(eth_dev, false);
2902         if (rc != 0)
2903                 return rc;
2904         dev->tm_leaf_cnt = sq_cnt;
2905
2906         return 0;
2907 }
2908
2909 static int
2910 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
2911 {
2912         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2913         uint32_t def = eth_dev->data->nb_tx_queues;
2914         struct rte_tm_node_params params;
2915         uint32_t leaf_parent, i, rc = 0;
2916
2917         memset(&params, 0, sizeof(params));
2918
2919         if (nix_tm_have_tl1_access(dev)) {
2920                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2921                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2922                                         DEFAULT_RR_WEIGHT,
2923                                         NIX_TXSCH_LVL_TL1,
2924                                         OTX2_TM_LVL_ROOT, false, &params);
2925                 if (rc)
2926                         goto error;
2927                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2928                                         DEFAULT_RR_WEIGHT,
2929                                         NIX_TXSCH_LVL_TL2,
2930                                         OTX2_TM_LVL_SCH1, false, &params);
2931                 if (rc)
2932                         goto error;
2933                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2934                                         DEFAULT_RR_WEIGHT,
2935                                         NIX_TXSCH_LVL_TL3,
2936                                         OTX2_TM_LVL_SCH2, false, &params);
2937                 if (rc)
2938                         goto error;
2939                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2940                                         DEFAULT_RR_WEIGHT,
2941                                         NIX_TXSCH_LVL_TL4,
2942                                         OTX2_TM_LVL_SCH3, false, &params);
2943                 if (rc)
2944                         goto error;
2945                 leaf_parent = def + 3;
2946
2947                 /* Add per queue SMQ nodes */
2948                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2949                         rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2950                                                 leaf_parent,
2951                                                 0, DEFAULT_RR_WEIGHT,
2952                                                 NIX_TXSCH_LVL_SMQ,
2953                                                 OTX2_TM_LVL_SCH4,
2954                                                 false, &params);
2955                         if (rc)
2956                                 goto error;
2957                 }
2958
2959                 /* Add leaf nodes */
2960                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2961                         rc = nix_tm_node_add_to_list(dev, i,
2962                                                      leaf_parent + 1 + i, 0,
2963                                                      DEFAULT_RR_WEIGHT,
2964                                                      NIX_TXSCH_LVL_CNT,
2965                                                      OTX2_TM_LVL_QUEUE,
2966                                                      false, &params);
2967                 if (rc)
2968                         goto error;
2969                 }
2970
2971                 return 0;
2972         }
2973
2974         dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2975         rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2976                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
2977                                 OTX2_TM_LVL_ROOT, false, &params);
2978         if (rc)
2979                 goto error;
2980         rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2981                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
2982                                 OTX2_TM_LVL_SCH1, false, &params);
2983         if (rc)
2984                 goto error;
2985         rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2986                                      DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
2987                                      OTX2_TM_LVL_SCH2, false, &params);
2988         if (rc)
2989                 goto error;
2990         leaf_parent = def + 2;
2991
2992         /* Add per queue SMQ nodes */
2993         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2994                 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2995                                              leaf_parent,
2996                                              0, DEFAULT_RR_WEIGHT,
2997                                              NIX_TXSCH_LVL_SMQ,
2998                                              OTX2_TM_LVL_SCH3,
2999                                              false, &params);
3000                 if (rc)
3001                         goto error;
3002         }
3003
3004         /* Add leaf nodes */
3005         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3006                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
3007                                              DEFAULT_RR_WEIGHT,
3008                                              NIX_TXSCH_LVL_CNT,
3009                                              OTX2_TM_LVL_SCH4,
3010                                              false, &params);
3011                 if (rc)
3012                         break;
3013         }
3014 error:
3015         return rc;
3016 }
3017
3018 static int
3019 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3020                            struct otx2_nix_tm_node *tm_node,
3021                            uint64_t tx_rate)
3022 {
3023         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3024         struct otx2_nix_tm_shaper_profile profile;
3025         struct otx2_mbox *mbox = dev->mbox;
3026         volatile uint64_t *reg, *regval;
3027         struct nix_txschq_config *req;
3028         uint16_t flags;
3029         uint8_t k = 0;
3030         int rc;
3031
3032         flags = tm_node->flags;
3033
3034         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3035         req->lvl = NIX_TXSCH_LVL_MDQ;
3036         reg = req->reg;
3037         regval = req->regval;
3038
3039         if (tx_rate == 0) {
3040                 k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
3041                 flags &= ~NIX_TM_NODE_ENABLED;
3042                 goto exit;
3043         }
3044
3045         if (!(flags & NIX_TM_NODE_ENABLED)) {
3046                 k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
3047                 flags |= NIX_TM_NODE_ENABLED;
3048         }
3049
3050         /* Use only PIR for rate limit */
3051         memset(&profile, 0, sizeof(profile));
3052         profile.params.peak.rate = tx_rate;
3053         /* Minimum burst of ~4us Bytes of Tx */
3054         profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3055                                            (4ull * tx_rate) / (1E6 * 8));
3056         if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3057                 dev->tm_rate_min = tx_rate;
3058
3059         k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
3060 exit:
3061         req->num_regs = k;
3062         rc = otx2_mbox_process(mbox);
3063         if (rc)
3064                 return rc;
3065
3066         tm_node->flags = flags;
3067         return 0;
3068 }
3069
3070 int
3071 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3072                                 uint16_t queue_idx, uint16_t tx_rate_mbps)
3073 {
3074         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3075         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3076         struct otx2_nix_tm_node *tm_node;
3077         int rc;
3078
3079         /* Check for supported revisions */
3080         if (otx2_dev_is_95xx_Ax(dev) ||
3081             otx2_dev_is_96xx_Ax(dev))
3082                 return -EINVAL;
3083
3084         if (queue_idx >= eth_dev->data->nb_tx_queues)
3085                 return -EINVAL;
3086
3087         if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3088             !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3089                 goto error;
3090
3091         if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3092             eth_dev->data->nb_tx_queues > 1) {
3093                 /* For TM topology change ethdev needs to be stopped */
3094                 if (eth_dev->data->dev_started)
3095                         return -EBUSY;
3096
3097                 /*
3098                  * Disable xmit will be enabled when
3099                  * new topology is available.
3100                  */
3101                 rc = nix_xmit_disable(eth_dev);
3102                 if (rc) {
3103                         otx2_err("failed to disable TX, rc=%d", rc);
3104                         return -EIO;
3105                 }
3106
3107                 rc = nix_tm_free_resources(dev, 0, 0, false);
3108                 if (rc < 0) {
3109                         otx2_tm_dbg("failed to free default resources, rc %d",
3110                                    rc);
3111                         return -EIO;
3112                 }
3113
3114                 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3115                 if (rc < 0) {
3116                         otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3117                         return rc;
3118                 }
3119
3120                 rc = nix_tm_alloc_resources(eth_dev, true);
3121                 if (rc != 0) {
3122                         otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3123                         return rc;
3124                 }
3125
3126                 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3127                 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3128         }
3129
3130         tm_node = nix_tm_node_search(dev, queue_idx, false);
3131
3132         /* check if we found a valid leaf node */
3133         if (!tm_node ||
3134             !nix_tm_is_leaf(dev, tm_node->lvl) ||
3135             !tm_node->parent ||
3136             tm_node->parent->hw_id == UINT32_MAX)
3137                 return -EIO;
3138
3139         return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3140 error:
3141         otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3142         return -EINVAL;
3143 }
3144
3145 int
3146 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3147 {
3148         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3149
3150         if (!arg)
3151                 return -EINVAL;
3152
3153         /* Check for supported revisions */
3154         if (otx2_dev_is_95xx_Ax(dev) ||
3155             otx2_dev_is_96xx_Ax(dev))
3156                 return -EINVAL;
3157
3158         *(const void **)arg = &otx2_tm_ops;
3159
3160         return 0;
3161 }
3162
3163 int
3164 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3165 {
3166         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3167         int rc;
3168
3169         /* Xmit is assumed to be disabled */
3170         /* Free up resources already held */
3171         rc = nix_tm_free_resources(dev, 0, 0, false);
3172         if (rc) {
3173                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3174                 return rc;
3175         }
3176
3177         /* Clear shaper profiles */
3178         nix_tm_clear_shaper_profiles(dev);
3179
3180         dev->tm_flags = 0;
3181         return 0;
3182 }
3183
3184 int
3185 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3186                           uint32_t *rr_quantum, uint16_t *smq)
3187 {
3188         struct otx2_nix_tm_node *tm_node;
3189         int rc;
3190
3191         /* 0..sq_cnt-1 are leaf nodes */
3192         if (sq >= dev->tm_leaf_cnt)
3193                 return -EINVAL;
3194
3195         /* Search for internal node first */
3196         tm_node = nix_tm_node_search(dev, sq, false);
3197         if (!tm_node)
3198                 tm_node = nix_tm_node_search(dev, sq, true);
3199
3200         /* Check if we found a valid leaf node */
3201         if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3202             !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3203                 return -EIO;
3204         }
3205
3206         /* Get SMQ Id of leaf node's parent */
3207         *smq = tm_node->parent->hw_id;
3208         *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3209
3210         rc = nix_smq_xoff(dev, tm_node->parent, false);
3211         if (rc)
3212                 return rc;
3213         tm_node->flags |= NIX_TM_NODE_ENABLED;
3214
3215         return 0;
3216 }