874b0d72f53447291f8f885c22438eb36724b479
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_tm.h"
9
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
12
13 enum otx2_tm_node_level {
14         OTX2_TM_LVL_ROOT = 0,
15         OTX2_TM_LVL_SCH1,
16         OTX2_TM_LVL_SCH2,
17         OTX2_TM_LVL_SCH3,
18         OTX2_TM_LVL_SCH4,
19         OTX2_TM_LVL_QUEUE,
20         OTX2_TM_LVL_MAX,
21 };
22
23 static inline
24 uint64_t shaper2regval(struct shaper_params *shaper)
25 {
26         return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27                 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28                 (shaper->mantissa << 1);
29 }
30
31 int
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
33 {
34         int link = 13 /* SDP */;
35         uint16_t lmac_chan;
36         uint16_t map;
37
38         lmac_chan = dev->tx_chan_base;
39
40         /* CGX lmac link */
41         if (lmac_chan >= 0x800) {
42                 map = lmac_chan & 0x7FF;
43                 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44         } else if (lmac_chan < 0x700) {
45                 /* LBK channel */
46                 link = 12;
47         }
48
49         return link;
50 }
51
52 static uint8_t
53 nix_get_relchan(struct otx2_eth_dev *dev)
54 {
55         return dev->tx_chan_base & 0xff;
56 }
57
58 static bool
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
60 {
61         bool is_lbk = otx2_dev_is_lbk(dev);
62         return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
63 }
64
65 static bool
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
67 {
68         if (nix_tm_have_tl1_access(dev))
69                 return (lvl == OTX2_TM_LVL_QUEUE);
70
71         return (lvl == OTX2_TM_LVL_SCH4);
72 }
73
74 static int
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
76 {
77         struct otx2_nix_tm_node *child_node;
78
79         TAILQ_FOREACH(child_node, &dev->node_list, node) {
80                 if (!child_node->parent)
81                         continue;
82                 if (!(child_node->parent->id == node_id))
83                         continue;
84                 if (child_node->priority == child_node->parent->rr_prio)
85                         continue;
86                 return child_node->hw_id - child_node->priority;
87         }
88         return 0;
89 }
90
91
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
94 {
95         struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
96
97         TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98                 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99                         return tm_shaper_profile;
100         }
101         return NULL;
102 }
103
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106                    uint64_t *mantissa_p, uint64_t *div_exp_p)
107 {
108         uint64_t div_exp, exponent, mantissa;
109
110         /* Boundary checks */
111         if (value < MIN_SHAPER_RATE ||
112             value > MAX_SHAPER_RATE)
113                 return 0;
114
115         if (value <= SHAPER_RATE(0, 0, 0)) {
116                 /* Calculate rate div_exp and mantissa using
117                  * the following formula:
118                  *
119                  * value = (2E6 * (256 + mantissa)
120                  *              / ((1 << div_exp) * 256))
121                  */
122                 div_exp = 0;
123                 exponent = 0;
124                 mantissa = MAX_RATE_MANTISSA;
125
126                 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
127                         div_exp += 1;
128
129                 while (value <
130                        ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131                         ((1 << div_exp) * 256)))
132                         mantissa -= 1;
133         } else {
134                 /* Calculate rate exponent and mantissa using
135                  * the following formula:
136                  *
137                  * value = (2E6 * ((256 + mantissa) << exponent)) / 256
138                  *
139                  */
140                 div_exp = 0;
141                 exponent = MAX_RATE_EXPONENT;
142                 mantissa = MAX_RATE_MANTISSA;
143
144                 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
145                         exponent -= 1;
146
147                 while (value < ((NIX_SHAPER_RATE_CONST *
148                                 ((256 + mantissa) << exponent)) / 256))
149                         mantissa -= 1;
150         }
151
152         if (div_exp > MAX_RATE_DIV_EXP ||
153             exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
154                 return 0;
155
156         if (div_exp_p)
157                 *div_exp_p = div_exp;
158         if (exponent_p)
159                 *exponent_p = exponent;
160         if (mantissa_p)
161                 *mantissa_p = mantissa;
162
163         /* Calculate real rate value */
164         return SHAPER_RATE(exponent, mantissa, div_exp);
165 }
166
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169                     uint64_t *mantissa_p)
170 {
171         uint64_t exponent, mantissa;
172
173         if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
174                 return 0;
175
176         /* Calculate burst exponent and mantissa using
177          * the following formula:
178          *
179          * value = (((256 + mantissa) << (exponent + 1)
180          / 256)
181          *
182          */
183         exponent = MAX_BURST_EXPONENT;
184         mantissa = MAX_BURST_MANTISSA;
185
186         while (value < (1ull << (exponent + 1)))
187                 exponent -= 1;
188
189         while (value < ((256 + mantissa) << (exponent + 1)) / 256)
190                 mantissa -= 1;
191
192         if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
193                 return 0;
194
195         if (exponent_p)
196                 *exponent_p = exponent;
197         if (mantissa_p)
198                 *mantissa_p = mantissa;
199
200         return SHAPER_BURST(exponent, mantissa);
201 }
202
203 static void
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205                      struct shaper_params *cir,
206                      struct shaper_params *pir)
207 {
208         struct rte_tm_shaper_params *param = &profile->params;
209
210         if (!profile)
211                 return;
212
213         /* Calculate CIR exponent and mantissa */
214         if (param->committed.rate)
215                 cir->rate = shaper_rate_to_nix(param->committed.rate,
216                                                &cir->exponent,
217                                                &cir->mantissa,
218                                                &cir->div_exp);
219
220         /* Calculate PIR exponent and mantissa */
221         if (param->peak.rate)
222                 pir->rate = shaper_rate_to_nix(param->peak.rate,
223                                                &pir->exponent,
224                                                &pir->mantissa,
225                                                &pir->div_exp);
226
227         /* Calculate CIR burst exponent and mantissa */
228         if (param->committed.size)
229                 cir->burst = shaper_burst_to_nix(param->committed.size,
230                                                  &cir->burst_exponent,
231                                                  &cir->burst_mantissa);
232
233         /* Calculate PIR burst exponent and mantissa */
234         if (param->peak.size)
235                 pir->burst = shaper_burst_to_nix(param->peak.size,
236                                                  &pir->burst_exponent,
237                                                  &pir->burst_mantissa);
238 }
239
240 static void
241 shaper_default_red_algo(struct otx2_eth_dev *dev,
242                         struct otx2_nix_tm_node *tm_node,
243                         struct otx2_nix_tm_shaper_profile *profile)
244 {
245         struct shaper_params cir, pir;
246
247         /* C0 doesn't support STALL when both PIR & CIR are enabled */
248         if (profile && otx2_dev_is_96xx_Cx(dev)) {
249                 memset(&cir, 0, sizeof(cir));
250                 memset(&pir, 0, sizeof(pir));
251                 shaper_config_to_nix(profile, &cir, &pir);
252
253                 if (pir.rate && cir.rate) {
254                         tm_node->red_algo = NIX_REDALG_DISCARD;
255                         tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
256                         return;
257                 }
258         }
259
260         tm_node->red_algo = NIX_REDALG_STD;
261         tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
262 }
263
264 static int
265 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
266 {
267         struct otx2_mbox *mbox = dev->mbox;
268         struct nix_txschq_config *req;
269
270         /*
271          * Default config for TL1.
272          * For VF this is always ignored.
273          */
274
275         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
276         req->lvl = NIX_TXSCH_LVL_TL1;
277
278         /* Set DWRR quantum */
279         req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
280         req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
281         req->num_regs++;
282
283         req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
284         req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
285         req->num_regs++;
286
287         req->reg[2] = NIX_AF_TL1X_CIR(schq);
288         req->regval[2] = 0;
289         req->num_regs++;
290
291         return otx2_mbox_process(mbox);
292 }
293
294 static uint8_t
295 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
296                      struct otx2_nix_tm_node *tm_node,
297                      volatile uint64_t *reg, volatile uint64_t *regval)
298 {
299         uint64_t strict_prio = tm_node->priority;
300         uint32_t hw_lvl = tm_node->hw_lvl;
301         uint32_t schq = tm_node->hw_id;
302         uint64_t rr_quantum;
303         uint8_t k = 0;
304
305         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
306
307         /* For children to root, strict prio is default if either
308          * device root is TL2 or TL1 Static Priority is disabled.
309          */
310         if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
311             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
312              dev->tm_flags & NIX_TM_TL1_NO_SP))
313                 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
314
315         otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
316                      "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
317                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
318                      tm_node->id, strict_prio, rr_quantum, tm_node);
319
320         switch (hw_lvl) {
321         case NIX_TXSCH_LVL_SMQ:
322                 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
323                 regval[k] = (strict_prio << 24) | rr_quantum;
324                 k++;
325
326                 break;
327         case NIX_TXSCH_LVL_TL4:
328                 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
329                 regval[k] = (strict_prio << 24) | rr_quantum;
330                 k++;
331
332                 break;
333         case NIX_TXSCH_LVL_TL3:
334                 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
335                 regval[k] = (strict_prio << 24) | rr_quantum;
336                 k++;
337
338                 break;
339         case NIX_TXSCH_LVL_TL2:
340                 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
341                 regval[k] = (strict_prio << 24) | rr_quantum;
342                 k++;
343
344                 break;
345         case NIX_TXSCH_LVL_TL1:
346                 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
347                 regval[k] = rr_quantum;
348                 k++;
349
350                 break;
351         }
352
353         return k;
354 }
355
356 static uint8_t
357 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
358                       struct otx2_nix_tm_shaper_profile *profile,
359                       volatile uint64_t *reg, volatile uint64_t *regval)
360 {
361         struct shaper_params cir, pir;
362         uint32_t schq = tm_node->hw_id;
363         uint64_t adjust = 0;
364         uint8_t k = 0;
365
366         memset(&cir, 0, sizeof(cir));
367         memset(&pir, 0, sizeof(pir));
368         shaper_config_to_nix(profile, &cir, &pir);
369
370         /* Packet length adjust */
371         if (tm_node->pkt_mode)
372                 adjust = 1;
373         else if (profile)
374                 adjust = profile->params.pkt_length_adjust & 0x1FF;
375
376         otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, pir %" PRIu64
377                     "(%" PRIu64 "B), cir %" PRIu64 "(%" PRIu64 "B)"
378                     "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
379                     nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
380                     tm_node->id, pir.rate, pir.burst, cir.rate, cir.burst,
381                     adjust, tm_node->pkt_mode, tm_node);
382
383         switch (tm_node->hw_lvl) {
384         case NIX_TXSCH_LVL_SMQ:
385                 /* Configure PIR, CIR */
386                 reg[k] = NIX_AF_MDQX_PIR(schq);
387                 regval[k] = (pir.rate && pir.burst) ?
388                                 (shaper2regval(&pir) | 1) : 0;
389                 k++;
390
391                 reg[k] = NIX_AF_MDQX_CIR(schq);
392                 regval[k] = (cir.rate && cir.burst) ?
393                                 (shaper2regval(&cir) | 1) : 0;
394                 k++;
395
396                 /* Configure RED ALG */
397                 reg[k] = NIX_AF_MDQX_SHAPE(schq);
398                 regval[k] = (adjust |
399                              (uint64_t)tm_node->red_algo << 9 |
400                              (uint64_t)tm_node->pkt_mode << 24);
401                 k++;
402                 break;
403         case NIX_TXSCH_LVL_TL4:
404                 /* Configure PIR, CIR */
405                 reg[k] = NIX_AF_TL4X_PIR(schq);
406                 regval[k] = (pir.rate && pir.burst) ?
407                                 (shaper2regval(&pir) | 1) : 0;
408                 k++;
409
410                 reg[k] = NIX_AF_TL4X_CIR(schq);
411                 regval[k] = (cir.rate && cir.burst) ?
412                                 (shaper2regval(&cir) | 1) : 0;
413                 k++;
414
415                 /* Configure RED algo */
416                 reg[k] = NIX_AF_TL4X_SHAPE(schq);
417                 regval[k] = (adjust |
418                              (uint64_t)tm_node->red_algo << 9 |
419                              (uint64_t)tm_node->pkt_mode << 24);
420                 k++;
421                 break;
422         case NIX_TXSCH_LVL_TL3:
423                 /* Configure PIR, CIR */
424                 reg[k] = NIX_AF_TL3X_PIR(schq);
425                 regval[k] = (pir.rate && pir.burst) ?
426                                 (shaper2regval(&pir) | 1) : 0;
427                 k++;
428
429                 reg[k] = NIX_AF_TL3X_CIR(schq);
430                 regval[k] = (cir.rate && cir.burst) ?
431                                 (shaper2regval(&cir) | 1) : 0;
432                 k++;
433
434                 /* Configure RED algo */
435                 reg[k] = NIX_AF_TL3X_SHAPE(schq);
436                 regval[k] = (adjust |
437                              (uint64_t)tm_node->red_algo << 9 |
438                              (uint64_t)tm_node->pkt_mode << 24);
439                 k++;
440
441                 break;
442         case NIX_TXSCH_LVL_TL2:
443                 /* Configure PIR, CIR */
444                 reg[k] = NIX_AF_TL2X_PIR(schq);
445                 regval[k] = (pir.rate && pir.burst) ?
446                                 (shaper2regval(&pir) | 1) : 0;
447                 k++;
448
449                 reg[k] = NIX_AF_TL2X_CIR(schq);
450                 regval[k] = (cir.rate && cir.burst) ?
451                                 (shaper2regval(&cir) | 1) : 0;
452                 k++;
453
454                 /* Configure RED algo */
455                 reg[k] = NIX_AF_TL2X_SHAPE(schq);
456                 regval[k] = (adjust |
457                              (uint64_t)tm_node->red_algo << 9 |
458                              (uint64_t)tm_node->pkt_mode << 24);
459                 k++;
460
461                 break;
462         case NIX_TXSCH_LVL_TL1:
463                 /* Configure CIR */
464                 reg[k] = NIX_AF_TL1X_CIR(schq);
465                 regval[k] = (cir.rate && cir.burst) ?
466                                 (shaper2regval(&cir) | 1) : 0;
467                 k++;
468
469                 /* Configure length disable and adjust */
470                 reg[k] = NIX_AF_TL1X_SHAPE(schq);
471                 regval[k] = (adjust |
472                              (uint64_t)tm_node->pkt_mode << 24);
473                 k++;
474                 break;
475         }
476
477         return k;
478 }
479
480 static uint8_t
481 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
482                    volatile uint64_t *reg, volatile uint64_t *regval)
483 {
484         uint32_t hw_lvl = tm_node->hw_lvl;
485         uint32_t schq = tm_node->hw_id;
486         uint8_t k = 0;
487
488         otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
489                     nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
490                     tm_node->id, enable, tm_node);
491
492         regval[k] = enable;
493
494         switch (hw_lvl) {
495         case NIX_TXSCH_LVL_MDQ:
496                 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
497                 k++;
498                 break;
499         case NIX_TXSCH_LVL_TL4:
500                 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
501                 k++;
502                 break;
503         case NIX_TXSCH_LVL_TL3:
504                 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
505                 k++;
506                 break;
507         case NIX_TXSCH_LVL_TL2:
508                 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
509                 k++;
510                 break;
511         case NIX_TXSCH_LVL_TL1:
512                 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
513                 k++;
514                 break;
515         default:
516                 break;
517         }
518
519         return k;
520 }
521
522 static int
523 populate_tm_reg(struct otx2_eth_dev *dev,
524                 struct otx2_nix_tm_node *tm_node)
525 {
526         struct otx2_nix_tm_shaper_profile *profile;
527         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
528         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
529         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
530         struct otx2_mbox *mbox = dev->mbox;
531         uint64_t parent = 0, child = 0;
532         uint32_t hw_lvl, rr_prio, schq;
533         struct nix_txschq_config *req;
534         int rc = -EFAULT;
535         uint8_t k = 0;
536
537         memset(regval_mask, 0, sizeof(regval_mask));
538         profile = nix_tm_shaper_profile_search(dev,
539                                         tm_node->params.shaper_profile_id);
540         rr_prio = tm_node->rr_prio;
541         hw_lvl = tm_node->hw_lvl;
542         schq = tm_node->hw_id;
543
544         /* Root node will not have a parent node */
545         if (hw_lvl == dev->otx2_tm_root_lvl)
546                 parent = tm_node->parent_hw_id;
547         else
548                 parent = tm_node->parent->hw_id;
549
550         /* Do we need this trigger to configure TL1 */
551         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
552             hw_lvl == dev->otx2_tm_root_lvl) {
553                 rc = populate_tm_tl1_default(dev, parent);
554                 if (rc)
555                         goto error;
556         }
557
558         if (hw_lvl != NIX_TXSCH_LVL_SMQ)
559                 child = find_prio_anchor(dev, tm_node->id);
560
561         /* Override default rr_prio when TL1
562          * Static Priority is disabled
563          */
564         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
565             dev->tm_flags & NIX_TM_TL1_NO_SP) {
566                 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
567                 child = 0;
568         }
569
570         otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
571                     " prio_anchor %"PRIu64" rr_prio %u (%p)",
572                     nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
573                     parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
574
575         /* Prepare Topology and Link config */
576         switch (hw_lvl) {
577         case NIX_TXSCH_LVL_SMQ:
578
579                 /* Set xoff which will be cleared later and minimum length
580                  * which will be used for zero padding if packet length is
581                  * smaller
582                  */
583                 reg[k] = NIX_AF_SMQX_CFG(schq);
584                 regval[k] = BIT_ULL(50) | NIX_MIN_HW_FRS;
585                 regval_mask[k] = ~(BIT_ULL(50) | 0x7f);
586                 k++;
587
588                 /* Parent and schedule conf */
589                 reg[k] = NIX_AF_MDQX_PARENT(schq);
590                 regval[k] = parent << 16;
591                 k++;
592
593                 break;
594         case NIX_TXSCH_LVL_TL4:
595                 /* Parent and schedule conf */
596                 reg[k] = NIX_AF_TL4X_PARENT(schq);
597                 regval[k] = parent << 16;
598                 k++;
599
600                 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
601                 regval[k] = (child << 32) | (rr_prio << 1);
602                 k++;
603
604                 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
605                 if (otx2_dev_is_sdp(dev)) {
606                         reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
607                         regval[k] = BIT_ULL(12);
608                         k++;
609                 }
610                 break;
611         case NIX_TXSCH_LVL_TL3:
612                 /* Parent and schedule conf */
613                 reg[k] = NIX_AF_TL3X_PARENT(schq);
614                 regval[k] = parent << 16;
615                 k++;
616
617                 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
618                 regval[k] = (child << 32) | (rr_prio << 1);
619                 k++;
620
621                 /* Link configuration */
622                 if (!otx2_dev_is_sdp(dev) &&
623                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
624                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
625                                                 otx2_nix_get_link(dev));
626                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
627                         k++;
628                 }
629
630                 break;
631         case NIX_TXSCH_LVL_TL2:
632                 /* Parent and schedule conf */
633                 reg[k] = NIX_AF_TL2X_PARENT(schq);
634                 regval[k] = parent << 16;
635                 k++;
636
637                 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
638                 regval[k] = (child << 32) | (rr_prio << 1);
639                 k++;
640
641                 /* Link configuration */
642                 if (!otx2_dev_is_sdp(dev) &&
643                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
644                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
645                                                 otx2_nix_get_link(dev));
646                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
647                         k++;
648                 }
649
650                 break;
651         case NIX_TXSCH_LVL_TL1:
652                 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
653                 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
654                 k++;
655
656                 break;
657         }
658
659         /* Prepare schedule config */
660         k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
661
662         /* Prepare shaping config */
663         k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
664
665         if (!k)
666                 return 0;
667
668         /* Copy and send config mbox */
669         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
670         req->lvl = hw_lvl;
671         req->num_regs = k;
672
673         otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
674         otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
675         otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
676
677         rc = otx2_mbox_process(mbox);
678         if (rc)
679                 goto error;
680
681         return 0;
682 error:
683         otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
684         return rc;
685 }
686
687
688 static int
689 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
690 {
691         struct otx2_nix_tm_node *tm_node;
692         uint32_t hw_lvl;
693         int rc = 0;
694
695         for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
696                 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
697                         if (tm_node->hw_lvl == hw_lvl &&
698                             tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
699                                 rc = populate_tm_reg(dev, tm_node);
700                                 if (rc)
701                                         goto exit;
702                         }
703                 }
704         }
705 exit:
706         return rc;
707 }
708
709 static struct otx2_nix_tm_node *
710 nix_tm_node_search(struct otx2_eth_dev *dev,
711                    uint32_t node_id, bool user)
712 {
713         struct otx2_nix_tm_node *tm_node;
714
715         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
716                 if (tm_node->id == node_id &&
717                     (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
718                         return tm_node;
719         }
720         return NULL;
721 }
722
723 static uint32_t
724 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
725 {
726         struct otx2_nix_tm_node *tm_node;
727         uint32_t rr_num = 0;
728
729         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
730                 if (!tm_node->parent)
731                         continue;
732
733                 if (!(tm_node->parent->id == parent_id))
734                         continue;
735
736                 if (tm_node->priority == priority)
737                         rr_num++;
738         }
739         return rr_num;
740 }
741
742 static int
743 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
744 {
745         struct otx2_nix_tm_node *tm_node_child;
746         struct otx2_nix_tm_node *tm_node;
747         struct otx2_nix_tm_node *parent;
748         uint32_t rr_num = 0;
749         uint32_t priority;
750
751         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
752                 if (!tm_node->parent)
753                         continue;
754                 /* Count group of children of same priority i.e are RR */
755                 parent = tm_node->parent;
756                 priority = tm_node->priority;
757                 rr_num = check_rr(dev, priority, parent->id);
758
759                 /* Assuming that multiple RR groups are
760                  * not configured based on capability.
761                  */
762                 if (rr_num > 1) {
763                         parent->rr_prio = priority;
764                         parent->rr_num = rr_num;
765                 }
766
767                 /* Find out static priority children that are not in RR */
768                 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
769                         if (!tm_node_child->parent)
770                                 continue;
771                         if (parent->id != tm_node_child->parent->id)
772                                 continue;
773                         if (parent->max_prio == UINT32_MAX &&
774                             tm_node_child->priority != parent->rr_prio)
775                                 parent->max_prio = 0;
776
777                         if (parent->max_prio < tm_node_child->priority &&
778                             parent->rr_prio != tm_node_child->priority)
779                                 parent->max_prio = tm_node_child->priority;
780                 }
781         }
782
783         return 0;
784 }
785
786 static int
787 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
788                         uint32_t parent_node_id, uint32_t priority,
789                         uint32_t weight, uint16_t hw_lvl,
790                         uint16_t lvl, bool user,
791                         struct rte_tm_node_params *params)
792 {
793         struct otx2_nix_tm_shaper_profile *profile;
794         struct otx2_nix_tm_node *tm_node, *parent_node;
795         uint32_t profile_id;
796
797         profile_id = params->shaper_profile_id;
798         profile = nix_tm_shaper_profile_search(dev, profile_id);
799
800         parent_node = nix_tm_node_search(dev, parent_node_id, user);
801
802         tm_node = rte_zmalloc("otx2_nix_tm_node",
803                               sizeof(struct otx2_nix_tm_node), 0);
804         if (!tm_node)
805                 return -ENOMEM;
806
807         tm_node->lvl = lvl;
808         tm_node->hw_lvl = hw_lvl;
809
810         /* Maintain minimum weight */
811         if (!weight)
812                 weight = 1;
813
814         tm_node->id = node_id;
815         tm_node->priority = priority;
816         tm_node->weight = weight;
817         tm_node->rr_prio = 0xf;
818         tm_node->max_prio = UINT32_MAX;
819         tm_node->hw_id = UINT32_MAX;
820         tm_node->flags = 0;
821         if (user)
822                 tm_node->flags = NIX_TM_NODE_USER;
823
824         /* Packet mode */
825         if (!nix_tm_is_leaf(dev, lvl) &&
826             ((profile && profile->params.packet_mode) ||
827              (params->nonleaf.wfq_weight_mode &&
828               params->nonleaf.n_sp_priorities &&
829               !params->nonleaf.wfq_weight_mode[0])))
830                 tm_node->pkt_mode = 1;
831
832         rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
833
834         if (profile)
835                 profile->reference_count++;
836
837         tm_node->parent = parent_node;
838         tm_node->parent_hw_id = UINT32_MAX;
839         shaper_default_red_algo(dev, tm_node, profile);
840
841         TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
842
843         return 0;
844 }
845
846 static int
847 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
848 {
849         struct otx2_nix_tm_shaper_profile *shaper_profile;
850
851         while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
852                 if (shaper_profile->reference_count)
853                         otx2_tm_dbg("Shaper profile %u has non zero references",
854                                     shaper_profile->shaper_profile_id);
855                 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
856                 rte_free(shaper_profile);
857         }
858
859         return 0;
860 }
861
862 static int
863 nix_clear_path_xoff(struct otx2_eth_dev *dev,
864                     struct otx2_nix_tm_node *tm_node)
865 {
866         struct nix_txschq_config *req;
867         struct otx2_nix_tm_node *p;
868         int rc;
869
870         /* Manipulating SW_XOFF not supported on Ax */
871         if (otx2_dev_is_Ax(dev))
872                 return 0;
873
874         /* Enable nodes in path for flush to succeed */
875         if (!nix_tm_is_leaf(dev, tm_node->lvl))
876                 p = tm_node;
877         else
878                 p = tm_node->parent;
879         while (p) {
880                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
881                     (p->flags & NIX_TM_NODE_HWRES)) {
882                         req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
883                         req->lvl = p->hw_lvl;
884                         req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
885                                                            req->regval);
886                         rc = otx2_mbox_process(dev->mbox);
887                         if (rc)
888                                 return rc;
889
890                         p->flags |= NIX_TM_NODE_ENABLED;
891                 }
892                 p = p->parent;
893         }
894
895         return 0;
896 }
897
898 static int
899 nix_smq_xoff(struct otx2_eth_dev *dev,
900              struct otx2_nix_tm_node *tm_node,
901              bool enable)
902 {
903         struct otx2_mbox *mbox = dev->mbox;
904         struct nix_txschq_config *req;
905         uint16_t smq;
906         int rc;
907
908         smq = tm_node->hw_id;
909         otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
910                     enable ? "enable" : "disable");
911
912         rc = nix_clear_path_xoff(dev, tm_node);
913         if (rc)
914                 return rc;
915
916         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
917         req->lvl = NIX_TXSCH_LVL_SMQ;
918         req->num_regs = 1;
919
920         req->reg[0] = NIX_AF_SMQX_CFG(smq);
921         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
922         req->regval_mask[0] = enable ?
923                                 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
924
925         return otx2_mbox_process(mbox);
926 }
927
928 int
929 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
930 {
931         struct otx2_eth_txq *txq = __txq;
932         struct npa_aq_enq_req *req;
933         struct npa_aq_enq_rsp *rsp;
934         struct otx2_npa_lf *lf;
935         struct otx2_mbox *mbox;
936         uint64_t aura_handle;
937         int rc;
938
939         otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
940                     enable ? "enable" : "disable");
941
942         lf = otx2_npa_lf_obj_get();
943         if (!lf)
944                 return -EFAULT;
945         mbox = lf->mbox;
946         /* Set/clear sqb aura fc_ena */
947         aura_handle = txq->sqb_pool->pool_id;
948         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
949
950         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
951         req->ctype = NPA_AQ_CTYPE_AURA;
952         req->op = NPA_AQ_INSTOP_WRITE;
953         /* Below is not needed for aura writes but AF driver needs it */
954         /* AF will translate to associated poolctx */
955         req->aura.pool_addr = req->aura_id;
956
957         req->aura.fc_ena = enable;
958         req->aura_mask.fc_ena = 1;
959
960         rc = otx2_mbox_process(mbox);
961         if (rc)
962                 return rc;
963
964         /* Read back npa aura ctx */
965         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
966
967         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
968         req->ctype = NPA_AQ_CTYPE_AURA;
969         req->op = NPA_AQ_INSTOP_READ;
970
971         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
972         if (rc)
973                 return rc;
974
975         /* Init when enabled as there might be no triggers */
976         if (enable)
977                 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
978         else
979                 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
980         /* Sync write barrier */
981         rte_wmb();
982
983         return 0;
984 }
985
986 static int
987 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
988 {
989         uint16_t sqb_cnt, head_off, tail_off;
990         struct otx2_eth_dev *dev = txq->dev;
991         uint64_t wdata, val, prev;
992         uint16_t sq = txq->sq;
993         int64_t *regaddr;
994         uint64_t timeout;/* 10's of usec */
995
996         /* Wait for enough time based on shaper min rate */
997         timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
998         timeout = timeout / dev->tm_rate_min;
999         if (!timeout)
1000                 timeout = 10000;
1001
1002         wdata = ((uint64_t)sq << 32);
1003         regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
1004         val = otx2_atomic64_add_nosync(wdata, regaddr);
1005
1006         /* Spin multiple iterations as "txq->fc_cache_pkts" can still
1007          * have space to send pkts even though fc_mem is disabled
1008          */
1009
1010         while (true) {
1011                 prev = val;
1012                 rte_delay_us(10);
1013                 val = otx2_atomic64_add_nosync(wdata, regaddr);
1014                 /* Continue on error */
1015                 if (val & BIT_ULL(63))
1016                         continue;
1017
1018                 if (prev != val)
1019                         continue;
1020
1021                 sqb_cnt = val & 0xFFFF;
1022                 head_off = (val >> 20) & 0x3F;
1023                 tail_off = (val >> 28) & 0x3F;
1024
1025                 /* SQ reached quiescent state */
1026                 if (sqb_cnt <= 1 && head_off == tail_off &&
1027                     (*txq->fc_mem == txq->nb_sqb_bufs)) {
1028                         break;
1029                 }
1030
1031                 /* Timeout */
1032                 if (!timeout)
1033                         goto exit;
1034                 timeout--;
1035         }
1036
1037         return 0;
1038 exit:
1039         otx2_nix_tm_dump(dev);
1040         return -EFAULT;
1041 }
1042
1043 /* Flush and disable tx queue and its parent SMQ */
1044 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
1045 {
1046         struct otx2_nix_tm_node *tm_node, *sibling;
1047         struct otx2_eth_txq *txq;
1048         struct otx2_eth_dev *dev;
1049         uint16_t sq;
1050         bool user;
1051         int rc;
1052
1053         txq = _txq;
1054         dev = txq->dev;
1055         sq = txq->sq;
1056
1057         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1058
1059         /* Find the node for this SQ */
1060         tm_node = nix_tm_node_search(dev, sq, user);
1061         if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1062                 otx2_err("Invalid node/state for sq %u", sq);
1063                 return -EFAULT;
1064         }
1065
1066         /* Enable CGX RXTX to drain pkts */
1067         if (!dev_started) {
1068                 /* Though it enables both RX MCAM Entries and CGX Link
1069                  * we assume all the rx queues are stopped way back.
1070                  */
1071                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1072                 rc = otx2_mbox_process(dev->mbox);
1073                 if (rc) {
1074                         otx2_err("cgx start failed, rc=%d", rc);
1075                         return rc;
1076                 }
1077         }
1078
1079         /* Disable smq xoff for case it was enabled earlier */
1080         rc = nix_smq_xoff(dev, tm_node->parent, false);
1081         if (rc) {
1082                 otx2_err("Failed to enable smq %u, rc=%d",
1083                          tm_node->parent->hw_id, rc);
1084                 return rc;
1085         }
1086
1087         /* As per HRM, to disable an SQ, all other SQ's
1088          * that feed to same SMQ must be paused before SMQ flush.
1089          */
1090         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1091                 if (sibling->parent != tm_node->parent)
1092                         continue;
1093                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1094                         continue;
1095
1096                 sq = sibling->id;
1097                 txq = dev->eth_dev->data->tx_queues[sq];
1098                 if (!txq)
1099                         continue;
1100
1101                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1102                 if (rc) {
1103                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1104                         goto cleanup;
1105                 }
1106
1107                 /* Wait for sq entries to be flushed */
1108                 rc = nix_txq_flush_sq_spin(txq);
1109                 if (rc) {
1110                         otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1111                         return rc;
1112                 }
1113         }
1114
1115         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1116
1117         /* Disable and flush */
1118         rc = nix_smq_xoff(dev, tm_node->parent, true);
1119         if (rc) {
1120                 otx2_err("Failed to disable smq %u, rc=%d",
1121                          tm_node->parent->hw_id, rc);
1122                 goto cleanup;
1123         }
1124 cleanup:
1125         /* Restore cgx state */
1126         if (!dev_started) {
1127                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1128                 rc |= otx2_mbox_process(dev->mbox);
1129         }
1130
1131         return rc;
1132 }
1133
1134 int otx2_nix_sq_flush_post(void *_txq)
1135 {
1136         struct otx2_nix_tm_node *tm_node, *sibling;
1137         struct otx2_eth_txq *txq = _txq;
1138         struct otx2_eth_txq *s_txq;
1139         struct otx2_eth_dev *dev;
1140         bool once = false;
1141         uint16_t sq, s_sq;
1142         bool user;
1143         int rc;
1144
1145         dev = txq->dev;
1146         sq = txq->sq;
1147         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1148
1149         /* Find the node for this SQ */
1150         tm_node = nix_tm_node_search(dev, sq, user);
1151         if (!tm_node) {
1152                 otx2_err("Invalid node for sq %u", sq);
1153                 return -EFAULT;
1154         }
1155
1156         /* Enable all the siblings back */
1157         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1158                 if (sibling->parent != tm_node->parent)
1159                         continue;
1160
1161                 if (sibling->id == sq)
1162                         continue;
1163
1164                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1165                         continue;
1166
1167                 s_sq = sibling->id;
1168                 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1169                 if (!s_txq)
1170                         continue;
1171
1172                 if (!once) {
1173                         /* Enable back if any SQ is still present */
1174                         rc = nix_smq_xoff(dev, tm_node->parent, false);
1175                         if (rc) {
1176                                 otx2_err("Failed to enable smq %u, rc=%d",
1177                                          tm_node->parent->hw_id, rc);
1178                                 return rc;
1179                         }
1180                         once = true;
1181                 }
1182
1183                 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1184                 if (rc) {
1185                         otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1186                         return rc;
1187                 }
1188         }
1189
1190         return 0;
1191 }
1192
1193 static int
1194 nix_sq_sched_data(struct otx2_eth_dev *dev,
1195                   struct otx2_nix_tm_node *tm_node,
1196                   bool rr_quantum_only)
1197 {
1198         struct rte_eth_dev *eth_dev = dev->eth_dev;
1199         struct otx2_mbox *mbox = dev->mbox;
1200         uint16_t sq = tm_node->id, smq;
1201         struct nix_aq_enq_req *req;
1202         uint64_t rr_quantum;
1203         int rc;
1204
1205         smq = tm_node->parent->hw_id;
1206         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1207
1208         if (rr_quantum_only)
1209                 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1210         else
1211                 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1212                             sq, smq, rr_quantum);
1213
1214         if (sq > eth_dev->data->nb_tx_queues)
1215                 return -EFAULT;
1216
1217         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1218         req->qidx = sq;
1219         req->ctype = NIX_AQ_CTYPE_SQ;
1220         req->op = NIX_AQ_INSTOP_WRITE;
1221
1222         /* smq update only when needed */
1223         if (!rr_quantum_only) {
1224                 req->sq.smq = smq;
1225                 req->sq_mask.smq = ~req->sq_mask.smq;
1226         }
1227         req->sq.smq_rr_quantum = rr_quantum;
1228         req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1229
1230         rc = otx2_mbox_process(mbox);
1231         if (rc)
1232                 otx2_err("Failed to set smq, rc=%d", rc);
1233         return rc;
1234 }
1235
1236 int otx2_nix_sq_enable(void *_txq)
1237 {
1238         struct otx2_eth_txq *txq = _txq;
1239         int rc;
1240
1241         /* Enable sqb_aura fc */
1242         rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1243         if (rc) {
1244                 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1245                 return rc;
1246         }
1247
1248         return 0;
1249 }
1250
1251 static int
1252 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1253                       uint32_t flags, bool hw_only)
1254 {
1255         struct otx2_nix_tm_shaper_profile *profile;
1256         struct otx2_nix_tm_node *tm_node, *next_node;
1257         struct otx2_mbox *mbox = dev->mbox;
1258         struct nix_txsch_free_req *req;
1259         uint32_t profile_id;
1260         int rc = 0;
1261
1262         next_node = TAILQ_FIRST(&dev->node_list);
1263         while (next_node) {
1264                 tm_node = next_node;
1265                 next_node = TAILQ_NEXT(tm_node, node);
1266
1267                 /* Check for only requested nodes */
1268                 if ((tm_node->flags & flags_mask) != flags)
1269                         continue;
1270
1271                 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1272                     tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1273                     tm_node->flags & NIX_TM_NODE_HWRES) {
1274                         /* Free specific HW resource */
1275                         otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1276                                     nix_hwlvl2str(tm_node->hw_lvl),
1277                                     tm_node->hw_id, tm_node->lvl,
1278                                     tm_node->id, tm_node);
1279
1280                         rc = nix_clear_path_xoff(dev, tm_node);
1281                         if (rc)
1282                                 return rc;
1283
1284                         req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1285                         req->flags = 0;
1286                         req->schq_lvl = tm_node->hw_lvl;
1287                         req->schq = tm_node->hw_id;
1288                         rc = otx2_mbox_process(mbox);
1289                         if (rc)
1290                                 return rc;
1291                         tm_node->flags &= ~NIX_TM_NODE_HWRES;
1292                 }
1293
1294                 /* Leave software elements if needed */
1295                 if (hw_only)
1296                         continue;
1297
1298                 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1299                             tm_node->lvl, tm_node->id, tm_node);
1300
1301                 profile_id = tm_node->params.shaper_profile_id;
1302                 profile = nix_tm_shaper_profile_search(dev, profile_id);
1303                 if (profile)
1304                         profile->reference_count--;
1305
1306                 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1307                 rte_free(tm_node);
1308         }
1309
1310         if (!flags_mask) {
1311                 /* Free all hw resources */
1312                 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1313                 req->flags = TXSCHQ_FREE_ALL;
1314
1315                 return otx2_mbox_process(mbox);
1316         }
1317
1318         return rc;
1319 }
1320
1321 static uint8_t
1322 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1323                        struct nix_txsch_alloc_rsp *rsp)
1324 {
1325         uint16_t schq;
1326         uint8_t lvl;
1327
1328         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1329                 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1330                         dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1331                         dev->txschq_contig_list[lvl][schq] =
1332                                 rsp->schq_contig_list[lvl][schq];
1333                 }
1334
1335                 dev->txschq[lvl] = rsp->schq[lvl];
1336                 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1337         }
1338         return 0;
1339 }
1340
1341 static int
1342 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1343                          struct otx2_nix_tm_node *child,
1344                          struct otx2_nix_tm_node *parent)
1345 {
1346         uint32_t hw_id, schq_con_index, prio_offset;
1347         uint32_t l_id, schq_index;
1348
1349         otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1350                     nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1351
1352         child->flags |= NIX_TM_NODE_HWRES;
1353
1354         /* Process root nodes */
1355         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1356             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1357                 int idx = 0;
1358                 uint32_t tschq_con_index;
1359
1360                 l_id = child->hw_lvl;
1361                 tschq_con_index = dev->txschq_contig_index[l_id];
1362                 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1363                 child->hw_id = hw_id;
1364                 dev->txschq_contig_index[l_id]++;
1365                 /* Update TL1 hw_id for its parent for config purpose */
1366                 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1367                 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1368                 child->parent_hw_id = hw_id;
1369                 return 0;
1370         }
1371         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1372             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1373                 uint32_t tschq_con_index;
1374
1375                 l_id = child->hw_lvl;
1376                 tschq_con_index = dev->txschq_index[l_id];
1377                 hw_id = dev->txschq_list[l_id][tschq_con_index];
1378                 child->hw_id = hw_id;
1379                 dev->txschq_index[l_id]++;
1380                 return 0;
1381         }
1382
1383         /* Process children with parents */
1384         l_id = child->hw_lvl;
1385         schq_index = dev->txschq_index[l_id];
1386         schq_con_index = dev->txschq_contig_index[l_id];
1387
1388         if (child->priority == parent->rr_prio) {
1389                 hw_id = dev->txschq_list[l_id][schq_index];
1390                 child->hw_id = hw_id;
1391                 child->parent_hw_id = parent->hw_id;
1392                 dev->txschq_index[l_id]++;
1393         } else {
1394                 prio_offset = schq_con_index + child->priority;
1395                 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1396                 child->hw_id = hw_id;
1397         }
1398         return 0;
1399 }
1400
1401 static int
1402 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1403 {
1404         struct otx2_nix_tm_node *parent, *child;
1405         uint32_t child_hw_lvl, con_index_inc, i;
1406
1407         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1408                 TAILQ_FOREACH(parent, &dev->node_list, node) {
1409                         child_hw_lvl = parent->hw_lvl - 1;
1410                         if (parent->hw_lvl != i)
1411                                 continue;
1412                         TAILQ_FOREACH(child, &dev->node_list, node) {
1413                                 if (!child->parent)
1414                                         continue;
1415                                 if (child->parent->id != parent->id)
1416                                         continue;
1417                                 nix_tm_assign_id_to_node(dev, child, parent);
1418                         }
1419
1420                         con_index_inc = parent->max_prio + 1;
1421                         dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1422
1423                         /*
1424                          * Explicitly assign id to parent node if it
1425                          * doesn't have a parent
1426                          */
1427                         if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1428                                 nix_tm_assign_id_to_node(dev, parent, NULL);
1429                 }
1430         }
1431         return 0;
1432 }
1433
1434 static uint8_t
1435 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1436                       struct nix_txsch_alloc_req *req, uint8_t lvl)
1437 {
1438         struct otx2_nix_tm_node *tm_node;
1439         uint8_t contig_count;
1440
1441         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1442                 if (lvl == tm_node->hw_lvl) {
1443                         req->schq[lvl - 1] += tm_node->rr_num;
1444                         if (tm_node->max_prio != UINT32_MAX) {
1445                                 contig_count = tm_node->max_prio + 1;
1446                                 req->schq_contig[lvl - 1] += contig_count;
1447                         }
1448                 }
1449                 if (lvl == dev->otx2_tm_root_lvl &&
1450                     dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1451                     tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1452                         req->schq_contig[dev->otx2_tm_root_lvl]++;
1453                 }
1454         }
1455
1456         req->schq[NIX_TXSCH_LVL_TL1] = 1;
1457         req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1458
1459         return 0;
1460 }
1461
1462 static int
1463 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1464                           struct nix_txsch_alloc_req *req)
1465 {
1466         uint8_t i;
1467
1468         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1469                 nix_tm_count_req_schq(dev, req, i);
1470
1471         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1472                 dev->txschq_index[i] = 0;
1473                 dev->txschq_contig_index[i] = 0;
1474         }
1475         return 0;
1476 }
1477
1478 static int
1479 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1480 {
1481         struct otx2_mbox *mbox = dev->mbox;
1482         struct nix_txsch_alloc_req *req;
1483         struct nix_txsch_alloc_rsp *rsp;
1484         int rc;
1485
1486         req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1487
1488         rc = nix_tm_prepare_txschq_req(dev, req);
1489         if (rc)
1490                 return rc;
1491
1492         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1493         if (rc)
1494                 return rc;
1495
1496         nix_tm_copy_rsp_to_dev(dev, rsp);
1497         dev->link_cfg_lvl = rsp->link_cfg_lvl;
1498
1499         nix_tm_assign_hw_id(dev);
1500         return 0;
1501 }
1502
1503 static int
1504 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1505 {
1506         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1507         struct otx2_nix_tm_node *tm_node;
1508         struct otx2_eth_txq *txq;
1509         uint16_t sq;
1510         int rc;
1511
1512         nix_tm_update_parent_info(dev);
1513
1514         rc = nix_tm_send_txsch_alloc_msg(dev);
1515         if (rc) {
1516                 otx2_err("TM failed to alloc tm resources=%d", rc);
1517                 return rc;
1518         }
1519
1520         rc = nix_tm_txsch_reg_config(dev);
1521         if (rc) {
1522                 otx2_err("TM failed to configure sched registers=%d", rc);
1523                 return rc;
1524         }
1525
1526         /* Trigger MTU recalculate as SMQ needs MTU conf */
1527         if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1528                 rc = otx2_nix_recalc_mtu(eth_dev);
1529                 if (rc) {
1530                         otx2_err("TM MTU update failed, rc=%d", rc);
1531                         return rc;
1532                 }
1533         }
1534
1535         /* Mark all non-leaf's as enabled */
1536         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1537                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1538                         tm_node->flags |= NIX_TM_NODE_ENABLED;
1539         }
1540
1541         if (!xmit_enable)
1542                 return 0;
1543
1544         /* Update SQ Sched Data while SQ is idle */
1545         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1546                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1547                         continue;
1548
1549                 rc = nix_sq_sched_data(dev, tm_node, false);
1550                 if (rc) {
1551                         otx2_err("SQ %u sched update failed, rc=%d",
1552                                  tm_node->id, rc);
1553                         return rc;
1554                 }
1555         }
1556
1557         /* Finally XON all SMQ's */
1558         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1559                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1560                         continue;
1561
1562                 rc = nix_smq_xoff(dev, tm_node, false);
1563                 if (rc) {
1564                         otx2_err("Failed to enable smq %u, rc=%d",
1565                                  tm_node->hw_id, rc);
1566                         return rc;
1567                 }
1568         }
1569
1570         /* Enable xmit as all the topology is ready */
1571         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1572                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1573                         continue;
1574
1575                 sq = tm_node->id;
1576                 txq = eth_dev->data->tx_queues[sq];
1577
1578                 rc = otx2_nix_sq_enable(txq);
1579                 if (rc) {
1580                         otx2_err("TM sw xon failed on SQ %u, rc=%d",
1581                                  tm_node->id, rc);
1582                         return rc;
1583                 }
1584                 tm_node->flags |= NIX_TM_NODE_ENABLED;
1585         }
1586
1587         return 0;
1588 }
1589
1590 static int
1591 send_tm_reqval(struct otx2_mbox *mbox,
1592                struct nix_txschq_config *req,
1593                struct rte_tm_error *error)
1594 {
1595         int rc;
1596
1597         if (!req->num_regs ||
1598             req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1599                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1600                 error->message = "invalid config";
1601                 return -EIO;
1602         }
1603
1604         rc = otx2_mbox_process(mbox);
1605         if (rc) {
1606                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1607                 error->message = "unexpected fatal error";
1608         }
1609         return rc;
1610 }
1611
1612 static uint16_t
1613 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1614 {
1615         if (nix_tm_have_tl1_access(dev)) {
1616                 switch (lvl) {
1617                 case OTX2_TM_LVL_ROOT:
1618                         return NIX_TXSCH_LVL_TL1;
1619                 case OTX2_TM_LVL_SCH1:
1620                         return NIX_TXSCH_LVL_TL2;
1621                 case OTX2_TM_LVL_SCH2:
1622                         return NIX_TXSCH_LVL_TL3;
1623                 case OTX2_TM_LVL_SCH3:
1624                         return NIX_TXSCH_LVL_TL4;
1625                 case OTX2_TM_LVL_SCH4:
1626                         return NIX_TXSCH_LVL_SMQ;
1627                 default:
1628                         return NIX_TXSCH_LVL_CNT;
1629                 }
1630         } else {
1631                 switch (lvl) {
1632                 case OTX2_TM_LVL_ROOT:
1633                         return NIX_TXSCH_LVL_TL2;
1634                 case OTX2_TM_LVL_SCH1:
1635                         return NIX_TXSCH_LVL_TL3;
1636                 case OTX2_TM_LVL_SCH2:
1637                         return NIX_TXSCH_LVL_TL4;
1638                 case OTX2_TM_LVL_SCH3:
1639                         return NIX_TXSCH_LVL_SMQ;
1640                 default:
1641                         return NIX_TXSCH_LVL_CNT;
1642                 }
1643         }
1644 }
1645
1646 static uint16_t
1647 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1648 {
1649         if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1650                 return 0;
1651
1652         /* MDQ doesn't support SP */
1653         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1654                 return 0;
1655
1656         /* PF's TL1 with VF's enabled doesn't support SP */
1657         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1658             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1659              (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1660                 return 0;
1661
1662         return TXSCH_TLX_SP_PRIO_MAX - 1;
1663 }
1664
1665
1666 static int
1667 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1668               uint32_t parent_id, uint32_t priority,
1669               struct rte_tm_error *error)
1670 {
1671         uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1672         struct otx2_nix_tm_node *tm_node;
1673         uint32_t rr_num = 0;
1674         int i;
1675
1676         /* Validate priority against max */
1677         if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1678                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1679                 error->message = "unsupported priority value";
1680                 return -EINVAL;
1681         }
1682
1683         if (parent_id == RTE_TM_NODE_ID_NULL)
1684                 return 0;
1685
1686         memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1687         priorities[priority] = 1;
1688
1689         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1690                 if (!tm_node->parent)
1691                         continue;
1692
1693                 if (!(tm_node->flags & NIX_TM_NODE_USER))
1694                         continue;
1695
1696                 if (tm_node->parent->id != parent_id)
1697                         continue;
1698
1699                 priorities[tm_node->priority]++;
1700         }
1701
1702         for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1703                 if (priorities[i] > 1)
1704                         rr_num++;
1705
1706         /* At max, one rr groups per parent */
1707         if (rr_num > 1) {
1708                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1709                 error->message = "multiple DWRR node priority";
1710                 return -EINVAL;
1711         }
1712
1713         /* Check for previous priority to avoid holes in priorities */
1714         if (priority && !priorities[priority - 1]) {
1715                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1716                 error->message = "priority not in order";
1717                 return -EINVAL;
1718         }
1719
1720         return 0;
1721 }
1722
1723 static int
1724 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1725             uint64_t *regval, uint32_t hw_lvl)
1726 {
1727         volatile struct nix_txschq_config *req;
1728         struct nix_txschq_config *rsp;
1729         int rc;
1730
1731         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1732         req->read = 1;
1733         req->lvl = hw_lvl;
1734         req->reg[0] = reg;
1735         req->num_regs = 1;
1736
1737         rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1738         if (rc)
1739                 return rc;
1740         *regval = rsp->regval[0];
1741         return 0;
1742 }
1743
1744 /* Search for min rate in topology */
1745 static void
1746 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1747 {
1748         struct otx2_nix_tm_shaper_profile *profile;
1749         uint64_t rate_min = 1E9; /* 1 Gbps */
1750
1751         TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1752                 if (profile->params.peak.rate &&
1753                     profile->params.peak.rate < rate_min)
1754                         rate_min = profile->params.peak.rate;
1755
1756                 if (profile->params.committed.rate &&
1757                     profile->params.committed.rate < rate_min)
1758                         rate_min = profile->params.committed.rate;
1759         }
1760
1761         dev->tm_rate_min = rate_min;
1762 }
1763
1764 static int
1765 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1766 {
1767         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1768         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1769         uint16_t sqb_cnt, head_off, tail_off;
1770         struct otx2_nix_tm_node *tm_node;
1771         struct otx2_eth_txq *txq;
1772         uint64_t wdata, val;
1773         int i, rc;
1774
1775         otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1776
1777         /* Enable CGX RXTX to drain pkts */
1778         if (!eth_dev->data->dev_started) {
1779                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1780                 rc = otx2_mbox_process(dev->mbox);
1781                 if (rc)
1782                         return rc;
1783         }
1784
1785         /* XON all SMQ's */
1786         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1787                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1788                         continue;
1789                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1790                         continue;
1791
1792                 rc = nix_smq_xoff(dev, tm_node, false);
1793                 if (rc) {
1794                         otx2_err("Failed to enable smq %u, rc=%d",
1795                                  tm_node->hw_id, rc);
1796                         goto cleanup;
1797                 }
1798         }
1799
1800         /* Flush all tx queues */
1801         for (i = 0; i < sq_cnt; i++) {
1802                 txq = eth_dev->data->tx_queues[i];
1803
1804                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1805                 if (rc) {
1806                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1807                         goto cleanup;
1808                 }
1809
1810                 /* Wait for sq entries to be flushed */
1811                 rc = nix_txq_flush_sq_spin(txq);
1812                 if (rc) {
1813                         otx2_err("Failed to drain sq, rc=%d\n", rc);
1814                         goto cleanup;
1815                 }
1816         }
1817
1818         /* XOFF & Flush all SMQ's. HRM mandates
1819          * all SQ's empty before SMQ flush is issued.
1820          */
1821         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1822                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1823                         continue;
1824                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1825                         continue;
1826
1827                 rc = nix_smq_xoff(dev, tm_node, true);
1828                 if (rc) {
1829                         otx2_err("Failed to enable smq %u, rc=%d",
1830                                  tm_node->hw_id, rc);
1831                         goto cleanup;
1832                 }
1833         }
1834
1835         /* Verify sanity of all tx queues */
1836         for (i = 0; i < sq_cnt; i++) {
1837                 txq = eth_dev->data->tx_queues[i];
1838
1839                 wdata = ((uint64_t)txq->sq << 32);
1840                 val = otx2_atomic64_add_nosync(wdata,
1841                                (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1842
1843                 sqb_cnt = val & 0xFFFF;
1844                 head_off = (val >> 20) & 0x3F;
1845                 tail_off = (val >> 28) & 0x3F;
1846
1847                 if (sqb_cnt > 1 || head_off != tail_off ||
1848                     (*txq->fc_mem != txq->nb_sqb_bufs))
1849                         otx2_err("Failed to gracefully flush sq %u", txq->sq);
1850         }
1851
1852 cleanup:
1853         /* restore cgx state */
1854         if (!eth_dev->data->dev_started) {
1855                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1856                 rc |= otx2_mbox_process(dev->mbox);
1857         }
1858
1859         return rc;
1860 }
1861
1862 static int
1863 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1864                           int *is_leaf, struct rte_tm_error *error)
1865 {
1866         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1867         struct otx2_nix_tm_node *tm_node;
1868
1869         if (is_leaf == NULL) {
1870                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1871                 return -EINVAL;
1872         }
1873
1874         tm_node = nix_tm_node_search(dev, node_id, true);
1875         if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1876                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1877                 return -EINVAL;
1878         }
1879         if (nix_tm_is_leaf(dev, tm_node->lvl))
1880                 *is_leaf = true;
1881         else
1882                 *is_leaf = false;
1883         return 0;
1884 }
1885
1886 static int
1887 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1888                      struct rte_tm_capabilities *cap,
1889                      struct rte_tm_error *error)
1890 {
1891         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1892         struct otx2_mbox *mbox = dev->mbox;
1893         int rc, max_nr_nodes = 0, i;
1894         struct free_rsrcs_rsp *rsp;
1895
1896         memset(cap, 0, sizeof(*cap));
1897
1898         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1899         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1900         if (rc) {
1901                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1902                 error->message = "unexpected fatal error";
1903                 return rc;
1904         }
1905
1906         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1907                 max_nr_nodes += rsp->schq[i];
1908
1909         cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1910         /* TL1 level is reserved for PF */
1911         cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1912                                 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1913         cap->non_leaf_nodes_identical = 1;
1914         cap->leaf_nodes_identical = 1;
1915
1916         /* Shaper Capabilities */
1917         cap->shaper_private_n_max = max_nr_nodes;
1918         cap->shaper_n_max = max_nr_nodes;
1919         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1920         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1921         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1922         cap->shaper_private_packet_mode_supported = 1;
1923         cap->shaper_private_byte_mode_supported = 1;
1924         cap->shaper_pkt_length_adjust_min = NIX_LENGTH_ADJUST_MIN;
1925         cap->shaper_pkt_length_adjust_max = NIX_LENGTH_ADJUST_MAX;
1926
1927         /* Schedule Capabilities */
1928         cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1929         cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1930         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1931         cap->sched_wfq_n_groups_max = 1;
1932         cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1933         cap->sched_wfq_packet_mode_supported = 1;
1934         cap->sched_wfq_byte_mode_supported = 1;
1935
1936         cap->dynamic_update_mask =
1937                 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1938                 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1939         cap->stats_mask =
1940                 RTE_TM_STATS_N_PKTS |
1941                 RTE_TM_STATS_N_BYTES |
1942                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1943                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1944
1945         for (i = 0; i < RTE_COLORS; i++) {
1946                 cap->mark_vlan_dei_supported[i] = false;
1947                 cap->mark_ip_ecn_tcp_supported[i] = false;
1948                 cap->mark_ip_dscp_supported[i] = false;
1949         }
1950
1951         return 0;
1952 }
1953
1954 static int
1955 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1956                                    struct rte_tm_level_capabilities *cap,
1957                                    struct rte_tm_error *error)
1958 {
1959         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1960         struct otx2_mbox *mbox = dev->mbox;
1961         struct free_rsrcs_rsp *rsp;
1962         uint16_t hw_lvl;
1963         int rc;
1964
1965         memset(cap, 0, sizeof(*cap));
1966
1967         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1968         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1969         if (rc) {
1970                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1971                 error->message = "unexpected fatal error";
1972                 return rc;
1973         }
1974
1975         hw_lvl = nix_tm_lvl2nix(dev, lvl);
1976
1977         if (nix_tm_is_leaf(dev, lvl)) {
1978                 /* Leaf */
1979                 cap->n_nodes_max = dev->tm_leaf_cnt;
1980                 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1981                 cap->leaf_nodes_identical = 1;
1982                 cap->leaf.stats_mask =
1983                         RTE_TM_STATS_N_PKTS |
1984                         RTE_TM_STATS_N_BYTES;
1985
1986         } else if (lvl == OTX2_TM_LVL_ROOT) {
1987                 /* Root node, aka TL2(vf)/TL1(pf) */
1988                 cap->n_nodes_max = 1;
1989                 cap->n_nodes_nonleaf_max = 1;
1990                 cap->non_leaf_nodes_identical = 1;
1991
1992                 cap->nonleaf.shaper_private_supported = true;
1993                 cap->nonleaf.shaper_private_dual_rate_supported =
1994                         nix_tm_have_tl1_access(dev) ? false : true;
1995                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1996                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1997                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
1998                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
1999
2000                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2001                 cap->nonleaf.sched_sp_n_priorities_max =
2002                                         nix_max_prio(dev, hw_lvl) + 1;
2003                 cap->nonleaf.sched_wfq_n_groups_max = 1;
2004                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2005                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2006                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2007
2008                 if (nix_tm_have_tl1_access(dev))
2009                         cap->nonleaf.stats_mask =
2010                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
2011                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2012         } else if ((lvl < OTX2_TM_LVL_MAX) &&
2013                    (hw_lvl < NIX_TXSCH_LVL_CNT)) {
2014                 /* TL2, TL3, TL4, MDQ */
2015                 cap->n_nodes_max = rsp->schq[hw_lvl];
2016                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
2017                 cap->non_leaf_nodes_identical = 1;
2018
2019                 cap->nonleaf.shaper_private_supported = true;
2020                 cap->nonleaf.shaper_private_dual_rate_supported = true;
2021                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2022                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2023                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
2024                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
2025
2026                 /* MDQ doesn't support Strict Priority */
2027                 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2028                         cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2029                 else
2030                         cap->nonleaf.sched_n_children_max =
2031                                 rsp->schq[hw_lvl - 1];
2032                 cap->nonleaf.sched_sp_n_priorities_max =
2033                         nix_max_prio(dev, hw_lvl) + 1;
2034                 cap->nonleaf.sched_wfq_n_groups_max = 1;
2035                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2036                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2037                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2038         } else {
2039                 /* unsupported level */
2040                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2041                 return rc;
2042         }
2043         return 0;
2044 }
2045
2046 static int
2047 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
2048                           struct rte_tm_node_capabilities *cap,
2049                           struct rte_tm_error *error)
2050 {
2051         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2052         struct otx2_mbox *mbox = dev->mbox;
2053         struct otx2_nix_tm_node *tm_node;
2054         struct free_rsrcs_rsp *rsp;
2055         int rc, hw_lvl, lvl;
2056
2057         memset(cap, 0, sizeof(*cap));
2058
2059         tm_node = nix_tm_node_search(dev, node_id, true);
2060         if (!tm_node) {
2061                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2062                 error->message = "no such node";
2063                 return -EINVAL;
2064         }
2065
2066         hw_lvl = tm_node->hw_lvl;
2067         lvl = tm_node->lvl;
2068
2069         /* Leaf node */
2070         if (nix_tm_is_leaf(dev, lvl)) {
2071                 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2072                                         RTE_TM_STATS_N_BYTES;
2073                 return 0;
2074         }
2075
2076         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2077         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2078         if (rc) {
2079                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2080                 error->message = "unexpected fatal error";
2081                 return rc;
2082         }
2083
2084         /* Non Leaf Shaper */
2085         cap->shaper_private_supported = true;
2086         cap->shaper_private_dual_rate_supported =
2087                 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2088         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2089         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2090         cap->shaper_private_packet_mode_supported = 1;
2091         cap->shaper_private_byte_mode_supported = 1;
2092
2093         /* Non Leaf Scheduler */
2094         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2095                 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2096         else
2097                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2098
2099         cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2100         cap->nonleaf.sched_wfq_n_children_per_group_max =
2101                 cap->nonleaf.sched_n_children_max;
2102         cap->nonleaf.sched_wfq_n_groups_max = 1;
2103         cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2104         cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2105         cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2106
2107         if (hw_lvl == NIX_TXSCH_LVL_TL1)
2108                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2109                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2110         return 0;
2111 }
2112
2113 static int
2114 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2115                                uint32_t profile_id,
2116                                struct rte_tm_shaper_params *params,
2117                                struct rte_tm_error *error)
2118 {
2119         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2120         struct otx2_nix_tm_shaper_profile *profile;
2121
2122         profile = nix_tm_shaper_profile_search(dev, profile_id);
2123         if (profile) {
2124                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2125                 error->message = "shaper profile ID exist";
2126                 return -EINVAL;
2127         }
2128
2129         /* Committed rate and burst size can be enabled/disabled */
2130         if (params->committed.size || params->committed.rate) {
2131                 if (params->committed.size < MIN_SHAPER_BURST ||
2132                     params->committed.size > MAX_SHAPER_BURST) {
2133                         error->type =
2134                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2135                         return -EINVAL;
2136                 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2137                                                NULL, NULL, NULL)) {
2138                         error->type =
2139                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2140                         error->message = "shaper committed rate invalid";
2141                         return -EINVAL;
2142                 }
2143         }
2144
2145         /* Peak rate and burst size can be enabled/disabled */
2146         if (params->peak.size || params->peak.rate) {
2147                 if (params->peak.size < MIN_SHAPER_BURST ||
2148                     params->peak.size > MAX_SHAPER_BURST) {
2149                         error->type =
2150                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2151                         return -EINVAL;
2152                 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2153                                                NULL, NULL, NULL)) {
2154                         error->type =
2155                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2156                         error->message = "shaper peak rate invalid";
2157                         return -EINVAL;
2158                 }
2159         }
2160
2161         if (params->pkt_length_adjust < NIX_LENGTH_ADJUST_MIN ||
2162             params->pkt_length_adjust > NIX_LENGTH_ADJUST_MAX) {
2163                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
2164                 error->message = "length adjust invalid";
2165                 return -EINVAL;
2166         }
2167
2168         profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2169                               sizeof(struct otx2_nix_tm_shaper_profile), 0);
2170         if (!profile)
2171                 return -ENOMEM;
2172
2173         profile->shaper_profile_id = profile_id;
2174         rte_memcpy(&profile->params, params,
2175                    sizeof(struct rte_tm_shaper_params));
2176         TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2177
2178         otx2_tm_dbg("Added TM shaper profile %u, "
2179                     " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2180                     ", cbs %" PRIu64 " , adj %u, pkt mode %d",
2181                     profile_id,
2182                     params->peak.rate * 8,
2183                     params->peak.size,
2184                     params->committed.rate * 8,
2185                     params->committed.size,
2186                     params->pkt_length_adjust,
2187                     params->packet_mode);
2188
2189         /* Translate rate as bits per second */
2190         profile->params.peak.rate = profile->params.peak.rate * 8;
2191         profile->params.committed.rate = profile->params.committed.rate * 8;
2192         /* Always use PIR for single rate shaping */
2193         if (!params->peak.rate && params->committed.rate) {
2194                 profile->params.peak = profile->params.committed;
2195                 memset(&profile->params.committed, 0,
2196                        sizeof(profile->params.committed));
2197         }
2198
2199         /* update min rate */
2200         nix_tm_shaper_profile_update_min(dev);
2201         return 0;
2202 }
2203
2204 static int
2205 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2206                                   uint32_t profile_id,
2207                                   struct rte_tm_error *error)
2208 {
2209         struct otx2_nix_tm_shaper_profile *profile;
2210         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2211
2212         profile = nix_tm_shaper_profile_search(dev, profile_id);
2213
2214         if (!profile) {
2215                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2216                 error->message = "shaper profile ID not exist";
2217                 return -EINVAL;
2218         }
2219
2220         if (profile->reference_count) {
2221                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2222                 error->message = "shaper profile in use";
2223                 return -EINVAL;
2224         }
2225
2226         otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2227         TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2228         rte_free(profile);
2229
2230         /* update min rate */
2231         nix_tm_shaper_profile_update_min(dev);
2232         return 0;
2233 }
2234
2235 static int
2236 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2237                      uint32_t parent_node_id, uint32_t priority,
2238                      uint32_t weight, uint32_t lvl,
2239                      struct rte_tm_node_params *params,
2240                      struct rte_tm_error *error)
2241 {
2242         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2243         struct otx2_nix_tm_shaper_profile *profile = NULL;
2244         struct otx2_nix_tm_node *parent_node;
2245         int rc, pkt_mode, clear_on_fail = 0;
2246         uint32_t exp_next_lvl, i;
2247         uint32_t profile_id;
2248         uint16_t hw_lvl;
2249
2250         /* we don't support dynamic updates */
2251         if (dev->tm_flags & NIX_TM_COMMITTED) {
2252                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2253                 error->message = "dynamic update not supported";
2254                 return -EIO;
2255         }
2256
2257         /* Leaf nodes have to be same priority */
2258         if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2259                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2260                 error->message = "queue shapers must be priority 0";
2261                 return -EIO;
2262         }
2263
2264         parent_node = nix_tm_node_search(dev, parent_node_id, true);
2265
2266         /* find the right level */
2267         if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2268                 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2269                         lvl = OTX2_TM_LVL_ROOT;
2270                 } else if (parent_node) {
2271                         lvl = parent_node->lvl + 1;
2272                 } else {
2273                         /* Neigher proper parent nor proper level id given */
2274                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2275                         error->message = "invalid parent node id";
2276                         return -ERANGE;
2277                 }
2278         }
2279
2280         /* Translate rte_tm level id's to nix hw level id's */
2281         hw_lvl = nix_tm_lvl2nix(dev, lvl);
2282         if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2283             !nix_tm_is_leaf(dev, lvl)) {
2284                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2285                 error->message = "invalid level id";
2286                 return -ERANGE;
2287         }
2288
2289         if (node_id < dev->tm_leaf_cnt)
2290                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2291         else
2292                 exp_next_lvl = hw_lvl + 1;
2293
2294         /* Check if there is no parent node yet */
2295         if (hw_lvl != dev->otx2_tm_root_lvl &&
2296             (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2297                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2298                 error->message = "invalid parent node id";
2299                 return -EINVAL;
2300         }
2301
2302         /* Check if a node already exists */
2303         if (nix_tm_node_search(dev, node_id, true)) {
2304                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2305                 error->message = "node already exists";
2306                 return -EINVAL;
2307         }
2308
2309         if (!nix_tm_is_leaf(dev, lvl)) {
2310                 /* Check if shaper profile exists for non leaf node */
2311                 profile_id = params->shaper_profile_id;
2312                 profile = nix_tm_shaper_profile_search(dev, profile_id);
2313                 if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && !profile) {
2314                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2315                         error->message = "invalid shaper profile";
2316                         return -EINVAL;
2317                 }
2318
2319                 /* Minimum static priority count is 1 */
2320                 if (!params->nonleaf.n_sp_priorities ||
2321                     params->nonleaf.n_sp_priorities > TXSCH_TLX_SP_PRIO_MAX) {
2322                         error->type =
2323                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
2324                         error->message = "invalid sp priorities";
2325                         return -EINVAL;
2326                 }
2327
2328                 pkt_mode = 0;
2329                 /* Validate weight mode */
2330                 for (i = 0; i < params->nonleaf.n_sp_priorities &&
2331                      params->nonleaf.wfq_weight_mode; i++) {
2332                         pkt_mode = !params->nonleaf.wfq_weight_mode[i];
2333                         if (pkt_mode == !params->nonleaf.wfq_weight_mode[0])
2334                                 continue;
2335
2336                         error->type =
2337                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
2338                         error->message = "unsupported weight mode";
2339                         return -EINVAL;
2340                 }
2341
2342                 if (profile && params->nonleaf.n_sp_priorities &&
2343                     pkt_mode != profile->params.packet_mode) {
2344                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2345                         error->message = "shaper wfq packet mode mismatch";
2346                         return -EINVAL;
2347                 }
2348         }
2349
2350         /* Check if there is second DWRR already in siblings or holes in prio */
2351         if (validate_prio(dev, lvl, parent_node_id, priority, error))
2352                 return -EINVAL;
2353
2354         if (weight > MAX_SCHED_WEIGHT) {
2355                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2356                 error->message = "max weight exceeded";
2357                 return -EINVAL;
2358         }
2359
2360         rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2361                                      priority, weight, hw_lvl,
2362                                      lvl, true, params);
2363         if (rc) {
2364                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2365                 /* cleanup user added nodes */
2366                 if (clear_on_fail)
2367                         nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2368                                               NIX_TM_NODE_USER, false);
2369                 error->message = "failed to add node";
2370                 return rc;
2371         }
2372         error->type = RTE_TM_ERROR_TYPE_NONE;
2373         return 0;
2374 }
2375
2376 static int
2377 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2378                         struct rte_tm_error *error)
2379 {
2380         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2381         struct otx2_nix_tm_node *tm_node, *child_node;
2382         struct otx2_nix_tm_shaper_profile *profile;
2383         uint32_t profile_id;
2384
2385         /* we don't support dynamic updates yet */
2386         if (dev->tm_flags & NIX_TM_COMMITTED) {
2387                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2388                 error->message = "hierarchy exists";
2389                 return -EIO;
2390         }
2391
2392         if (node_id == RTE_TM_NODE_ID_NULL) {
2393                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2394                 error->message = "invalid node id";
2395                 return -EINVAL;
2396         }
2397
2398         tm_node = nix_tm_node_search(dev, node_id, true);
2399         if (!tm_node) {
2400                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2401                 error->message = "no such node";
2402                 return -EINVAL;
2403         }
2404
2405         /* Check for any existing children */
2406         TAILQ_FOREACH(child_node, &dev->node_list, node) {
2407                 if (child_node->parent == tm_node) {
2408                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2409                         error->message = "children exist";
2410                         return -EINVAL;
2411                 }
2412         }
2413
2414         /* Remove shaper profile reference */
2415         profile_id = tm_node->params.shaper_profile_id;
2416         profile = nix_tm_shaper_profile_search(dev, profile_id);
2417         profile->reference_count--;
2418
2419         TAILQ_REMOVE(&dev->node_list, tm_node, node);
2420         rte_free(tm_node);
2421         return 0;
2422 }
2423
2424 static int
2425 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2426                            struct rte_tm_error *error, bool suspend)
2427 {
2428         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2429         struct otx2_mbox *mbox = dev->mbox;
2430         struct otx2_nix_tm_node *tm_node;
2431         struct nix_txschq_config *req;
2432         uint16_t flags;
2433         int rc;
2434
2435         tm_node = nix_tm_node_search(dev, node_id, true);
2436         if (!tm_node) {
2437                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2438                 error->message = "no such node";
2439                 return -EINVAL;
2440         }
2441
2442         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2443                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2444                 error->message = "hierarchy doesn't exist";
2445                 return -EINVAL;
2446         }
2447
2448         flags = tm_node->flags;
2449         flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2450                 (flags | NIX_TM_NODE_ENABLED);
2451
2452         if (tm_node->flags == flags)
2453                 return 0;
2454
2455         /* send mbox for state change */
2456         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2457
2458         req->lvl = tm_node->hw_lvl;
2459         req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2460                                            req->reg, req->regval);
2461         rc = send_tm_reqval(mbox, req, error);
2462         if (!rc)
2463                 tm_node->flags = flags;
2464         return rc;
2465 }
2466
2467 static int
2468 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2469                          struct rte_tm_error *error)
2470 {
2471         return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2472 }
2473
2474 static int
2475 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2476                         struct rte_tm_error *error)
2477 {
2478         return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2479 }
2480
2481 static int
2482 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2483                              int clear_on_fail,
2484                              struct rte_tm_error *error)
2485 {
2486         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2487         struct otx2_nix_tm_node *tm_node;
2488         uint32_t leaf_cnt = 0;
2489         int rc;
2490
2491         if (dev->tm_flags & NIX_TM_COMMITTED) {
2492                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2493                 error->message = "hierarchy exists";
2494                 return -EINVAL;
2495         }
2496
2497         /* Check if we have all the leaf nodes */
2498         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2499                 if (tm_node->flags & NIX_TM_NODE_USER &&
2500                     tm_node->id < dev->tm_leaf_cnt)
2501                         leaf_cnt++;
2502         }
2503
2504         if (leaf_cnt != dev->tm_leaf_cnt) {
2505                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2506                 error->message = "incomplete hierarchy";
2507                 return -EINVAL;
2508         }
2509
2510         /*
2511          * Disable xmit will be enabled when
2512          * new topology is available.
2513          */
2514         rc = nix_xmit_disable(eth_dev);
2515         if (rc) {
2516                 otx2_err("failed to disable TX, rc=%d", rc);
2517                 return -EIO;
2518         }
2519
2520         /* Delete default/ratelimit tree */
2521         if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2522                 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2523                 if (rc) {
2524                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2525                         error->message = "failed to free default resources";
2526                         return rc;
2527                 }
2528                 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2529                                    NIX_TM_RATE_LIMIT_TREE);
2530         }
2531
2532         /* Free up user alloc'ed resources */
2533         rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2534                                    NIX_TM_NODE_USER, true);
2535         if (rc) {
2536                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2537                 error->message = "failed to free user resources";
2538                 return rc;
2539         }
2540
2541         rc = nix_tm_alloc_resources(eth_dev, true);
2542         if (rc) {
2543                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2544                 error->message = "alloc resources failed";
2545                 /* TODO should we restore default config ? */
2546                 if (clear_on_fail)
2547                         nix_tm_free_resources(dev, 0, 0, false);
2548                 return rc;
2549         }
2550
2551         error->type = RTE_TM_ERROR_TYPE_NONE;
2552         dev->tm_flags |= NIX_TM_COMMITTED;
2553         return 0;
2554 }
2555
2556 static int
2557 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2558                                uint32_t node_id,
2559                                uint32_t profile_id,
2560                                struct rte_tm_error *error)
2561 {
2562         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2563         struct otx2_nix_tm_shaper_profile *profile = NULL;
2564         struct otx2_mbox *mbox = dev->mbox;
2565         struct otx2_nix_tm_node *tm_node;
2566         struct nix_txschq_config *req;
2567         uint8_t k;
2568         int rc;
2569
2570         tm_node = nix_tm_node_search(dev, node_id, true);
2571         if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2572                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2573                 error->message = "invalid node";
2574                 return -EINVAL;
2575         }
2576
2577         if (profile_id == tm_node->params.shaper_profile_id)
2578                 return 0;
2579
2580         if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2581                 profile = nix_tm_shaper_profile_search(dev, profile_id);
2582                 if (!profile) {
2583                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2584                         error->message = "shaper profile ID not exist";
2585                         return -EINVAL;
2586                 }
2587         }
2588
2589         if (profile && profile->params.packet_mode != tm_node->pkt_mode) {
2590                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2591                 error->message = "shaper profile pkt mode mismatch";
2592                 return -EINVAL;
2593         }
2594
2595         tm_node->params.shaper_profile_id = profile_id;
2596
2597         /* Nothing to do if not yet committed */
2598         if (!(dev->tm_flags & NIX_TM_COMMITTED))
2599                 return 0;
2600
2601         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2602
2603         /* Flush the specific node with SW_XOFF */
2604         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2605         req->lvl = tm_node->hw_lvl;
2606         k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2607         req->num_regs = k;
2608
2609         rc = send_tm_reqval(mbox, req, error);
2610         if (rc)
2611                 return rc;
2612
2613         shaper_default_red_algo(dev, tm_node, profile);
2614
2615         /* Update the PIR/CIR and clear SW XOFF */
2616         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2617         req->lvl = tm_node->hw_lvl;
2618
2619         k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2620
2621         k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2622
2623         req->num_regs = k;
2624         rc = send_tm_reqval(mbox, req, error);
2625         if (!rc)
2626                 tm_node->flags |= NIX_TM_NODE_ENABLED;
2627         return rc;
2628 }
2629
2630 static int
2631 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2632                                uint32_t node_id, uint32_t new_parent_id,
2633                                uint32_t priority, uint32_t weight,
2634                                struct rte_tm_error *error)
2635 {
2636         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2637         struct otx2_nix_tm_node *tm_node, *sibling;
2638         struct otx2_nix_tm_node *new_parent;
2639         struct nix_txschq_config *req;
2640         uint8_t k;
2641         int rc;
2642
2643         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2644                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2645                 error->message = "hierarchy doesn't exist";
2646                 return -EINVAL;
2647         }
2648
2649         tm_node = nix_tm_node_search(dev, node_id, true);
2650         if (!tm_node) {
2651                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2652                 error->message = "no such node";
2653                 return -EINVAL;
2654         }
2655
2656         /* Parent id valid only for non root nodes */
2657         if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2658                 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2659                 if (!new_parent) {
2660                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2661                         error->message = "no such parent node";
2662                         return -EINVAL;
2663                 }
2664
2665                 /* Current support is only for dynamic weight update */
2666                 if (tm_node->parent != new_parent ||
2667                     tm_node->priority != priority) {
2668                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2669                         error->message = "only weight update supported";
2670                         return -EINVAL;
2671                 }
2672         }
2673
2674         /* Skip if no change */
2675         if (tm_node->weight == weight)
2676                 return 0;
2677
2678         tm_node->weight = weight;
2679
2680         /* For leaf nodes, SQ CTX needs update */
2681         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2682                 /* Update SQ quantum data on the fly */
2683                 rc = nix_sq_sched_data(dev, tm_node, true);
2684                 if (rc) {
2685                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2686                         error->message = "sq sched data update failed";
2687                         return rc;
2688                 }
2689         } else {
2690                 /* XOFF Parent node */
2691                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2692                 req->lvl = tm_node->parent->hw_lvl;
2693                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2694                                                    req->reg, req->regval);
2695                 rc = send_tm_reqval(dev->mbox, req, error);
2696                 if (rc)
2697                         return rc;
2698
2699                 /* XOFF this node and all other siblings */
2700                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2701                 req->lvl = tm_node->hw_lvl;
2702
2703                 k = 0;
2704                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2705                         if (sibling->parent != tm_node->parent)
2706                                 continue;
2707                         k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2708                                                 &req->regval[k]);
2709                 }
2710                 req->num_regs = k;
2711                 rc = send_tm_reqval(dev->mbox, req, error);
2712                 if (rc)
2713                         return rc;
2714
2715                 /* Update new weight for current node */
2716                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2717                 req->lvl = tm_node->hw_lvl;
2718                 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2719                                                      req->reg, req->regval);
2720                 rc = send_tm_reqval(dev->mbox, req, error);
2721                 if (rc)
2722                         return rc;
2723
2724                 /* XON this node and all other siblings */
2725                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2726                 req->lvl = tm_node->hw_lvl;
2727
2728                 k = 0;
2729                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2730                         if (sibling->parent != tm_node->parent)
2731                                 continue;
2732                         k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2733                                                 &req->regval[k]);
2734                 }
2735                 req->num_regs = k;
2736                 rc = send_tm_reqval(dev->mbox, req, error);
2737                 if (rc)
2738                         return rc;
2739
2740                 /* XON Parent node */
2741                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2742                 req->lvl = tm_node->parent->hw_lvl;
2743                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2744                                                    req->reg, req->regval);
2745                 rc = send_tm_reqval(dev->mbox, req, error);
2746                 if (rc)
2747                         return rc;
2748         }
2749         return 0;
2750 }
2751
2752 static int
2753 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2754                             struct rte_tm_node_stats *stats,
2755                             uint64_t *stats_mask, int clear,
2756                             struct rte_tm_error *error)
2757 {
2758         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2759         struct otx2_nix_tm_node *tm_node;
2760         uint64_t reg, val;
2761         int64_t *addr;
2762         int rc = 0;
2763
2764         tm_node = nix_tm_node_search(dev, node_id, true);
2765         if (!tm_node) {
2766                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2767                 error->message = "no such node";
2768                 return -EINVAL;
2769         }
2770
2771         /* Stats support only for leaf node or TL1 root */
2772         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2773                 reg = (((uint64_t)tm_node->id) << 32);
2774
2775                 /* Packets */
2776                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2777                 val = otx2_atomic64_add_nosync(reg, addr);
2778                 if (val & OP_ERR)
2779                         val = 0;
2780                 stats->n_pkts = val - tm_node->last_pkts;
2781
2782                 /* Bytes */
2783                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2784                 val = otx2_atomic64_add_nosync(reg, addr);
2785                 if (val & OP_ERR)
2786                         val = 0;
2787                 stats->n_bytes = val - tm_node->last_bytes;
2788
2789                 if (clear) {
2790                         tm_node->last_pkts = stats->n_pkts;
2791                         tm_node->last_bytes = stats->n_bytes;
2792                 }
2793
2794                 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2795
2796         } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2797                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2798                 error->message = "stats read error";
2799
2800                 /* RED Drop packets */
2801                 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2802                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2803                 if (rc)
2804                         goto exit;
2805                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2806                                                 val - tm_node->last_pkts;
2807
2808                 /* RED Drop bytes */
2809                 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2810                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2811                 if (rc)
2812                         goto exit;
2813                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2814                                                 val - tm_node->last_bytes;
2815
2816                 /* Clear stats */
2817                 if (clear) {
2818                         tm_node->last_pkts =
2819                                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2820                         tm_node->last_bytes =
2821                                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2822                 }
2823
2824                 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2825                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2826
2827         } else {
2828                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2829                 error->message = "unsupported node";
2830                 rc = -EINVAL;
2831         }
2832
2833 exit:
2834         return rc;
2835 }
2836
2837 const struct rte_tm_ops otx2_tm_ops = {
2838         .node_type_get = otx2_nix_tm_node_type_get,
2839
2840         .capabilities_get = otx2_nix_tm_capa_get,
2841         .level_capabilities_get = otx2_nix_tm_level_capa_get,
2842         .node_capabilities_get = otx2_nix_tm_node_capa_get,
2843
2844         .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2845         .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2846
2847         .node_add = otx2_nix_tm_node_add,
2848         .node_delete = otx2_nix_tm_node_delete,
2849         .node_suspend = otx2_nix_tm_node_suspend,
2850         .node_resume = otx2_nix_tm_node_resume,
2851         .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2852
2853         .node_shaper_update = otx2_nix_tm_node_shaper_update,
2854         .node_parent_update = otx2_nix_tm_node_parent_update,
2855         .node_stats_read = otx2_nix_tm_node_stats_read,
2856 };
2857
2858 static int
2859 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2860 {
2861         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2862         uint32_t def = eth_dev->data->nb_tx_queues;
2863         struct rte_tm_node_params params;
2864         uint32_t leaf_parent, i;
2865         int rc = 0, leaf_level;
2866
2867         /* Default params */
2868         memset(&params, 0, sizeof(params));
2869         params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2870
2871         if (nix_tm_have_tl1_access(dev)) {
2872                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2873                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2874                                              DEFAULT_RR_WEIGHT,
2875                                              NIX_TXSCH_LVL_TL1,
2876                                              OTX2_TM_LVL_ROOT, false, &params);
2877                 if (rc)
2878                         goto exit;
2879                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2880                                              DEFAULT_RR_WEIGHT,
2881                                              NIX_TXSCH_LVL_TL2,
2882                                              OTX2_TM_LVL_SCH1, false, &params);
2883                 if (rc)
2884                         goto exit;
2885
2886                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2887                                              DEFAULT_RR_WEIGHT,
2888                                              NIX_TXSCH_LVL_TL3,
2889                                              OTX2_TM_LVL_SCH2, false, &params);
2890                 if (rc)
2891                         goto exit;
2892
2893                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2894                                              DEFAULT_RR_WEIGHT,
2895                                              NIX_TXSCH_LVL_TL4,
2896                                              OTX2_TM_LVL_SCH3, false, &params);
2897                 if (rc)
2898                         goto exit;
2899
2900                 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2901                                              DEFAULT_RR_WEIGHT,
2902                                              NIX_TXSCH_LVL_SMQ,
2903                                              OTX2_TM_LVL_SCH4, false, &params);
2904                 if (rc)
2905                         goto exit;
2906
2907                 leaf_parent = def + 4;
2908                 leaf_level = OTX2_TM_LVL_QUEUE;
2909         } else {
2910                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2911                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2912                                              DEFAULT_RR_WEIGHT,
2913                                              NIX_TXSCH_LVL_TL2,
2914                                              OTX2_TM_LVL_ROOT, false, &params);
2915                 if (rc)
2916                         goto exit;
2917
2918                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2919                                              DEFAULT_RR_WEIGHT,
2920                                              NIX_TXSCH_LVL_TL3,
2921                                              OTX2_TM_LVL_SCH1, false, &params);
2922                 if (rc)
2923                         goto exit;
2924
2925                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2926                                              DEFAULT_RR_WEIGHT,
2927                                              NIX_TXSCH_LVL_TL4,
2928                                              OTX2_TM_LVL_SCH2, false, &params);
2929                 if (rc)
2930                         goto exit;
2931
2932                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2933                                              DEFAULT_RR_WEIGHT,
2934                                              NIX_TXSCH_LVL_SMQ,
2935                                              OTX2_TM_LVL_SCH3, false, &params);
2936                 if (rc)
2937                         goto exit;
2938
2939                 leaf_parent = def + 3;
2940                 leaf_level = OTX2_TM_LVL_SCH4;
2941         }
2942
2943         /* Add leaf nodes */
2944         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2945                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2946                                              DEFAULT_RR_WEIGHT,
2947                                              NIX_TXSCH_LVL_CNT,
2948                                              leaf_level, false, &params);
2949                 if (rc)
2950                         break;
2951         }
2952
2953 exit:
2954         return rc;
2955 }
2956
2957 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2958 {
2959         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2960
2961         TAILQ_INIT(&dev->node_list);
2962         TAILQ_INIT(&dev->shaper_profile_list);
2963         dev->tm_rate_min = 1E9; /* 1Gbps */
2964 }
2965
2966 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2967 {
2968         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2969         struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
2970         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2971         int rc;
2972
2973         /* Free up all resources already held */
2974         rc = nix_tm_free_resources(dev, 0, 0, false);
2975         if (rc) {
2976                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2977                 return rc;
2978         }
2979
2980         /* Clear shaper profiles */
2981         nix_tm_clear_shaper_profiles(dev);
2982         dev->tm_flags = NIX_TM_DEFAULT_TREE;
2983
2984         /* Disable TL1 Static Priority when VF's are enabled
2985          * as otherwise VF's TL2 reallocation will be needed
2986          * runtime to support a specific topology of PF.
2987          */
2988         if (pci_dev->max_vfs)
2989                 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2990
2991         rc = nix_tm_prepare_default_tree(eth_dev);
2992         if (rc != 0)
2993                 return rc;
2994
2995         rc = nix_tm_alloc_resources(eth_dev, false);
2996         if (rc != 0)
2997                 return rc;
2998         dev->tm_leaf_cnt = sq_cnt;
2999
3000         return 0;
3001 }
3002
3003 static int
3004 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
3005 {
3006         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3007         uint32_t def = eth_dev->data->nb_tx_queues;
3008         struct rte_tm_node_params params;
3009         uint32_t leaf_parent, i, rc = 0;
3010
3011         memset(&params, 0, sizeof(params));
3012
3013         if (nix_tm_have_tl1_access(dev)) {
3014                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
3015                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
3016                                         DEFAULT_RR_WEIGHT,
3017                                         NIX_TXSCH_LVL_TL1,
3018                                         OTX2_TM_LVL_ROOT, false, &params);
3019                 if (rc)
3020                         goto error;
3021                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
3022                                         DEFAULT_RR_WEIGHT,
3023                                         NIX_TXSCH_LVL_TL2,
3024                                         OTX2_TM_LVL_SCH1, false, &params);
3025                 if (rc)
3026                         goto error;
3027                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
3028                                         DEFAULT_RR_WEIGHT,
3029                                         NIX_TXSCH_LVL_TL3,
3030                                         OTX2_TM_LVL_SCH2, false, &params);
3031                 if (rc)
3032                         goto error;
3033                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
3034                                         DEFAULT_RR_WEIGHT,
3035                                         NIX_TXSCH_LVL_TL4,
3036                                         OTX2_TM_LVL_SCH3, false, &params);
3037                 if (rc)
3038                         goto error;
3039                 leaf_parent = def + 3;
3040
3041                 /* Add per queue SMQ nodes */
3042                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3043                         rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
3044                                                 leaf_parent,
3045                                                 0, DEFAULT_RR_WEIGHT,
3046                                                 NIX_TXSCH_LVL_SMQ,
3047                                                 OTX2_TM_LVL_SCH4,
3048                                                 false, &params);
3049                         if (rc)
3050                                 goto error;
3051                 }
3052
3053                 /* Add leaf nodes */
3054                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3055                         rc = nix_tm_node_add_to_list(dev, i,
3056                                                      leaf_parent + 1 + i, 0,
3057                                                      DEFAULT_RR_WEIGHT,
3058                                                      NIX_TXSCH_LVL_CNT,
3059                                                      OTX2_TM_LVL_QUEUE,
3060                                                      false, &params);
3061                 if (rc)
3062                         goto error;
3063                 }
3064
3065                 return 0;
3066         }
3067
3068         dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
3069         rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
3070                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
3071                                 OTX2_TM_LVL_ROOT, false, &params);
3072         if (rc)
3073                 goto error;
3074         rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
3075                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
3076                                 OTX2_TM_LVL_SCH1, false, &params);
3077         if (rc)
3078                 goto error;
3079         rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
3080                                      DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
3081                                      OTX2_TM_LVL_SCH2, false, &params);
3082         if (rc)
3083                 goto error;
3084         leaf_parent = def + 2;
3085
3086         /* Add per queue SMQ nodes */
3087         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3088                 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
3089                                              leaf_parent,
3090                                              0, DEFAULT_RR_WEIGHT,
3091                                              NIX_TXSCH_LVL_SMQ,
3092                                              OTX2_TM_LVL_SCH3,
3093                                              false, &params);
3094                 if (rc)
3095                         goto error;
3096         }
3097
3098         /* Add leaf nodes */
3099         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3100                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
3101                                              DEFAULT_RR_WEIGHT,
3102                                              NIX_TXSCH_LVL_CNT,
3103                                              OTX2_TM_LVL_SCH4,
3104                                              false, &params);
3105                 if (rc)
3106                         break;
3107         }
3108 error:
3109         return rc;
3110 }
3111
3112 static int
3113 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3114                            struct otx2_nix_tm_node *tm_node,
3115                            uint64_t tx_rate)
3116 {
3117         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3118         struct otx2_nix_tm_shaper_profile profile;
3119         struct otx2_mbox *mbox = dev->mbox;
3120         volatile uint64_t *reg, *regval;
3121         struct nix_txschq_config *req;
3122         uint16_t flags;
3123         uint8_t k = 0;
3124         int rc;
3125
3126         flags = tm_node->flags;
3127
3128         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3129         req->lvl = NIX_TXSCH_LVL_MDQ;
3130         reg = req->reg;
3131         regval = req->regval;
3132
3133         if (tx_rate == 0) {
3134                 k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
3135                 flags &= ~NIX_TM_NODE_ENABLED;
3136                 goto exit;
3137         }
3138
3139         if (!(flags & NIX_TM_NODE_ENABLED)) {
3140                 k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
3141                 flags |= NIX_TM_NODE_ENABLED;
3142         }
3143
3144         /* Use only PIR for rate limit */
3145         memset(&profile, 0, sizeof(profile));
3146         profile.params.peak.rate = tx_rate;
3147         /* Minimum burst of ~4us Bytes of Tx */
3148         profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3149                                            (4ull * tx_rate) / (1E6 * 8));
3150         if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3151                 dev->tm_rate_min = tx_rate;
3152
3153         k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
3154 exit:
3155         req->num_regs = k;
3156         rc = otx2_mbox_process(mbox);
3157         if (rc)
3158                 return rc;
3159
3160         tm_node->flags = flags;
3161         return 0;
3162 }
3163
3164 int
3165 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3166                                 uint16_t queue_idx, uint16_t tx_rate_mbps)
3167 {
3168         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3169         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3170         struct otx2_nix_tm_node *tm_node;
3171         int rc;
3172
3173         /* Check for supported revisions */
3174         if (otx2_dev_is_95xx_Ax(dev) ||
3175             otx2_dev_is_96xx_Ax(dev))
3176                 return -EINVAL;
3177
3178         if (queue_idx >= eth_dev->data->nb_tx_queues)
3179                 return -EINVAL;
3180
3181         if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3182             !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3183                 goto error;
3184
3185         if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3186             eth_dev->data->nb_tx_queues > 1) {
3187                 /* For TM topology change ethdev needs to be stopped */
3188                 if (eth_dev->data->dev_started)
3189                         return -EBUSY;
3190
3191                 /*
3192                  * Disable xmit will be enabled when
3193                  * new topology is available.
3194                  */
3195                 rc = nix_xmit_disable(eth_dev);
3196                 if (rc) {
3197                         otx2_err("failed to disable TX, rc=%d", rc);
3198                         return -EIO;
3199                 }
3200
3201                 rc = nix_tm_free_resources(dev, 0, 0, false);
3202                 if (rc < 0) {
3203                         otx2_tm_dbg("failed to free default resources, rc %d",
3204                                    rc);
3205                         return -EIO;
3206                 }
3207
3208                 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3209                 if (rc < 0) {
3210                         otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3211                         return rc;
3212                 }
3213
3214                 rc = nix_tm_alloc_resources(eth_dev, true);
3215                 if (rc != 0) {
3216                         otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3217                         return rc;
3218                 }
3219
3220                 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3221                 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3222         }
3223
3224         tm_node = nix_tm_node_search(dev, queue_idx, false);
3225
3226         /* check if we found a valid leaf node */
3227         if (!tm_node ||
3228             !nix_tm_is_leaf(dev, tm_node->lvl) ||
3229             !tm_node->parent ||
3230             tm_node->parent->hw_id == UINT32_MAX)
3231                 return -EIO;
3232
3233         return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3234 error:
3235         otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3236         return -EINVAL;
3237 }
3238
3239 int
3240 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3241 {
3242         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3243
3244         if (!arg)
3245                 return -EINVAL;
3246
3247         /* Check for supported revisions */
3248         if (otx2_dev_is_95xx_Ax(dev) ||
3249             otx2_dev_is_96xx_Ax(dev))
3250                 return -EINVAL;
3251
3252         *(const void **)arg = &otx2_tm_ops;
3253
3254         return 0;
3255 }
3256
3257 int
3258 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3259 {
3260         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3261         int rc;
3262
3263         /* Xmit is assumed to be disabled */
3264         /* Free up resources already held */
3265         rc = nix_tm_free_resources(dev, 0, 0, false);
3266         if (rc) {
3267                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3268                 return rc;
3269         }
3270
3271         /* Clear shaper profiles */
3272         nix_tm_clear_shaper_profiles(dev);
3273
3274         dev->tm_flags = 0;
3275         return 0;
3276 }
3277
3278 int
3279 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3280                           uint32_t *rr_quantum, uint16_t *smq)
3281 {
3282         struct otx2_nix_tm_node *tm_node;
3283         int rc;
3284
3285         /* 0..sq_cnt-1 are leaf nodes */
3286         if (sq >= dev->tm_leaf_cnt)
3287                 return -EINVAL;
3288
3289         /* Search for internal node first */
3290         tm_node = nix_tm_node_search(dev, sq, false);
3291         if (!tm_node)
3292                 tm_node = nix_tm_node_search(dev, sq, true);
3293
3294         /* Check if we found a valid leaf node */
3295         if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3296             !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3297                 return -EIO;
3298         }
3299
3300         /* Get SMQ Id of leaf node's parent */
3301         *smq = tm_node->parent->hw_id;
3302         *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3303
3304         rc = nix_smq_xoff(dev, tm_node->parent, false);
3305         if (rc)
3306                 return rc;
3307         tm_node->flags |= NIX_TM_NODE_ENABLED;
3308
3309         return 0;
3310 }