net/octeontx2: add TM capability
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_tm.h"
9
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
12
13 enum otx2_tm_node_level {
14         OTX2_TM_LVL_ROOT = 0,
15         OTX2_TM_LVL_SCH1,
16         OTX2_TM_LVL_SCH2,
17         OTX2_TM_LVL_SCH3,
18         OTX2_TM_LVL_SCH4,
19         OTX2_TM_LVL_QUEUE,
20         OTX2_TM_LVL_MAX,
21 };
22
23 static inline
24 uint64_t shaper2regval(struct shaper_params *shaper)
25 {
26         return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27                 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28                 (shaper->mantissa << 1);
29 }
30
31 int
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
33 {
34         int link = 13 /* SDP */;
35         uint16_t lmac_chan;
36         uint16_t map;
37
38         lmac_chan = dev->tx_chan_base;
39
40         /* CGX lmac link */
41         if (lmac_chan >= 0x800) {
42                 map = lmac_chan & 0x7FF;
43                 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44         } else if (lmac_chan < 0x700) {
45                 /* LBK channel */
46                 link = 12;
47         }
48
49         return link;
50 }
51
52 static uint8_t
53 nix_get_relchan(struct otx2_eth_dev *dev)
54 {
55         return dev->tx_chan_base & 0xff;
56 }
57
58 static bool
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
60 {
61         bool is_lbk = otx2_dev_is_lbk(dev);
62         return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
63 }
64
65 static bool
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
67 {
68         if (nix_tm_have_tl1_access(dev))
69                 return (lvl == OTX2_TM_LVL_QUEUE);
70
71         return (lvl == OTX2_TM_LVL_SCH4);
72 }
73
74 static int
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
76 {
77         struct otx2_nix_tm_node *child_node;
78
79         TAILQ_FOREACH(child_node, &dev->node_list, node) {
80                 if (!child_node->parent)
81                         continue;
82                 if (!(child_node->parent->id == node_id))
83                         continue;
84                 if (child_node->priority == child_node->parent->rr_prio)
85                         continue;
86                 return child_node->hw_id - child_node->priority;
87         }
88         return 0;
89 }
90
91
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
94 {
95         struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
96
97         TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98                 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99                         return tm_shaper_profile;
100         }
101         return NULL;
102 }
103
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106                    uint64_t *mantissa_p, uint64_t *div_exp_p)
107 {
108         uint64_t div_exp, exponent, mantissa;
109
110         /* Boundary checks */
111         if (value < MIN_SHAPER_RATE ||
112             value > MAX_SHAPER_RATE)
113                 return 0;
114
115         if (value <= SHAPER_RATE(0, 0, 0)) {
116                 /* Calculate rate div_exp and mantissa using
117                  * the following formula:
118                  *
119                  * value = (2E6 * (256 + mantissa)
120                  *              / ((1 << div_exp) * 256))
121                  */
122                 div_exp = 0;
123                 exponent = 0;
124                 mantissa = MAX_RATE_MANTISSA;
125
126                 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
127                         div_exp += 1;
128
129                 while (value <
130                        ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131                         ((1 << div_exp) * 256)))
132                         mantissa -= 1;
133         } else {
134                 /* Calculate rate exponent and mantissa using
135                  * the following formula:
136                  *
137                  * value = (2E6 * ((256 + mantissa) << exponent)) / 256
138                  *
139                  */
140                 div_exp = 0;
141                 exponent = MAX_RATE_EXPONENT;
142                 mantissa = MAX_RATE_MANTISSA;
143
144                 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
145                         exponent -= 1;
146
147                 while (value < ((NIX_SHAPER_RATE_CONST *
148                                 ((256 + mantissa) << exponent)) / 256))
149                         mantissa -= 1;
150         }
151
152         if (div_exp > MAX_RATE_DIV_EXP ||
153             exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
154                 return 0;
155
156         if (div_exp_p)
157                 *div_exp_p = div_exp;
158         if (exponent_p)
159                 *exponent_p = exponent;
160         if (mantissa_p)
161                 *mantissa_p = mantissa;
162
163         /* Calculate real rate value */
164         return SHAPER_RATE(exponent, mantissa, div_exp);
165 }
166
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169                     uint64_t *mantissa_p)
170 {
171         uint64_t exponent, mantissa;
172
173         if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
174                 return 0;
175
176         /* Calculate burst exponent and mantissa using
177          * the following formula:
178          *
179          * value = (((256 + mantissa) << (exponent + 1)
180          / 256)
181          *
182          */
183         exponent = MAX_BURST_EXPONENT;
184         mantissa = MAX_BURST_MANTISSA;
185
186         while (value < (1ull << (exponent + 1)))
187                 exponent -= 1;
188
189         while (value < ((256 + mantissa) << (exponent + 1)) / 256)
190                 mantissa -= 1;
191
192         if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
193                 return 0;
194
195         if (exponent_p)
196                 *exponent_p = exponent;
197         if (mantissa_p)
198                 *mantissa_p = mantissa;
199
200         return SHAPER_BURST(exponent, mantissa);
201 }
202
203 static void
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205                      struct shaper_params *cir,
206                      struct shaper_params *pir)
207 {
208         struct rte_tm_shaper_params *param = &profile->params;
209
210         if (!profile)
211                 return;
212
213         /* Calculate CIR exponent and mantissa */
214         if (param->committed.rate)
215                 cir->rate = shaper_rate_to_nix(param->committed.rate,
216                                                &cir->exponent,
217                                                &cir->mantissa,
218                                                &cir->div_exp);
219
220         /* Calculate PIR exponent and mantissa */
221         if (param->peak.rate)
222                 pir->rate = shaper_rate_to_nix(param->peak.rate,
223                                                &pir->exponent,
224                                                &pir->mantissa,
225                                                &pir->div_exp);
226
227         /* Calculate CIR burst exponent and mantissa */
228         if (param->committed.size)
229                 cir->burst = shaper_burst_to_nix(param->committed.size,
230                                                  &cir->burst_exponent,
231                                                  &cir->burst_mantissa);
232
233         /* Calculate PIR burst exponent and mantissa */
234         if (param->peak.size)
235                 pir->burst = shaper_burst_to_nix(param->peak.size,
236                                                  &pir->burst_exponent,
237                                                  &pir->burst_mantissa);
238 }
239
240 static int
241 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
242 {
243         struct otx2_mbox *mbox = dev->mbox;
244         struct nix_txschq_config *req;
245
246         /*
247          * Default config for TL1.
248          * For VF this is always ignored.
249          */
250
251         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
252         req->lvl = NIX_TXSCH_LVL_TL1;
253
254         /* Set DWRR quantum */
255         req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
256         req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
257         req->num_regs++;
258
259         req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
260         req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
261         req->num_regs++;
262
263         req->reg[2] = NIX_AF_TL1X_CIR(schq);
264         req->regval[2] = 0;
265         req->num_regs++;
266
267         return otx2_mbox_process(mbox);
268 }
269
270 static uint8_t
271 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
272                      struct otx2_nix_tm_node *tm_node,
273                      volatile uint64_t *reg, volatile uint64_t *regval)
274 {
275         uint64_t strict_prio = tm_node->priority;
276         uint32_t hw_lvl = tm_node->hw_lvl;
277         uint32_t schq = tm_node->hw_id;
278         uint64_t rr_quantum;
279         uint8_t k = 0;
280
281         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
282
283         /* For children to root, strict prio is default if either
284          * device root is TL2 or TL1 Static Priority is disabled.
285          */
286         if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
287             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
288              dev->tm_flags & NIX_TM_TL1_NO_SP))
289                 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
290
291         otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
292                      "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
293                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
294                      tm_node->id, strict_prio, rr_quantum, tm_node);
295
296         switch (hw_lvl) {
297         case NIX_TXSCH_LVL_SMQ:
298                 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
299                 regval[k] = (strict_prio << 24) | rr_quantum;
300                 k++;
301
302                 break;
303         case NIX_TXSCH_LVL_TL4:
304                 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
305                 regval[k] = (strict_prio << 24) | rr_quantum;
306                 k++;
307
308                 break;
309         case NIX_TXSCH_LVL_TL3:
310                 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
311                 regval[k] = (strict_prio << 24) | rr_quantum;
312                 k++;
313
314                 break;
315         case NIX_TXSCH_LVL_TL2:
316                 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
317                 regval[k] = (strict_prio << 24) | rr_quantum;
318                 k++;
319
320                 break;
321         case NIX_TXSCH_LVL_TL1:
322                 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
323                 regval[k] = rr_quantum;
324                 k++;
325
326                 break;
327         }
328
329         return k;
330 }
331
332 static uint8_t
333 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
334                       struct otx2_nix_tm_shaper_profile *profile,
335                       volatile uint64_t *reg, volatile uint64_t *regval)
336 {
337         struct shaper_params cir, pir;
338         uint32_t schq = tm_node->hw_id;
339         uint8_t k = 0;
340
341         memset(&cir, 0, sizeof(cir));
342         memset(&pir, 0, sizeof(pir));
343         shaper_config_to_nix(profile, &cir, &pir);
344
345         otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
346                     "pir %" PRIu64 "(%" PRIu64 "B),"
347                      " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
348                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
349                      tm_node->id, pir.rate, pir.burst,
350                      cir.rate, cir.burst, tm_node);
351
352         switch (tm_node->hw_lvl) {
353         case NIX_TXSCH_LVL_SMQ:
354                 /* Configure PIR, CIR */
355                 reg[k] = NIX_AF_MDQX_PIR(schq);
356                 regval[k] = (pir.rate && pir.burst) ?
357                                 (shaper2regval(&pir) | 1) : 0;
358                 k++;
359
360                 reg[k] = NIX_AF_MDQX_CIR(schq);
361                 regval[k] = (cir.rate && cir.burst) ?
362                                 (shaper2regval(&cir) | 1) : 0;
363                 k++;
364
365                 /* Configure RED ALG */
366                 reg[k] = NIX_AF_MDQX_SHAPE(schq);
367                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
368                 k++;
369                 break;
370         case NIX_TXSCH_LVL_TL4:
371                 /* Configure PIR, CIR */
372                 reg[k] = NIX_AF_TL4X_PIR(schq);
373                 regval[k] = (pir.rate && pir.burst) ?
374                                 (shaper2regval(&pir) | 1) : 0;
375                 k++;
376
377                 reg[k] = NIX_AF_TL4X_CIR(schq);
378                 regval[k] = (cir.rate && cir.burst) ?
379                                 (shaper2regval(&cir) | 1) : 0;
380                 k++;
381
382                 /* Configure RED algo */
383                 reg[k] = NIX_AF_TL4X_SHAPE(schq);
384                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
385                 k++;
386                 break;
387         case NIX_TXSCH_LVL_TL3:
388                 /* Configure PIR, CIR */
389                 reg[k] = NIX_AF_TL3X_PIR(schq);
390                 regval[k] = (pir.rate && pir.burst) ?
391                                 (shaper2regval(&pir) | 1) : 0;
392                 k++;
393
394                 reg[k] = NIX_AF_TL3X_CIR(schq);
395                 regval[k] = (cir.rate && cir.burst) ?
396                                 (shaper2regval(&cir) | 1) : 0;
397                 k++;
398
399                 /* Configure RED algo */
400                 reg[k] = NIX_AF_TL3X_SHAPE(schq);
401                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
402                 k++;
403
404                 break;
405         case NIX_TXSCH_LVL_TL2:
406                 /* Configure PIR, CIR */
407                 reg[k] = NIX_AF_TL2X_PIR(schq);
408                 regval[k] = (pir.rate && pir.burst) ?
409                                 (shaper2regval(&pir) | 1) : 0;
410                 k++;
411
412                 reg[k] = NIX_AF_TL2X_CIR(schq);
413                 regval[k] = (cir.rate && cir.burst) ?
414                                 (shaper2regval(&cir) | 1) : 0;
415                 k++;
416
417                 /* Configure RED algo */
418                 reg[k] = NIX_AF_TL2X_SHAPE(schq);
419                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
420                 k++;
421
422                 break;
423         case NIX_TXSCH_LVL_TL1:
424                 /* Configure CIR */
425                 reg[k] = NIX_AF_TL1X_CIR(schq);
426                 regval[k] = (cir.rate && cir.burst) ?
427                                 (shaper2regval(&cir) | 1) : 0;
428                 k++;
429                 break;
430         }
431
432         return k;
433 }
434
435 static uint8_t
436 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
437                    volatile uint64_t *reg, volatile uint64_t *regval)
438 {
439         uint32_t hw_lvl = tm_node->hw_lvl;
440         uint32_t schq = tm_node->hw_id;
441         uint8_t k = 0;
442
443         otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
444                     nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
445                     tm_node->id, enable, tm_node);
446
447         regval[k] = enable;
448
449         switch (hw_lvl) {
450         case NIX_TXSCH_LVL_MDQ:
451                 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
452                 k++;
453                 break;
454         case NIX_TXSCH_LVL_TL4:
455                 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
456                 k++;
457                 break;
458         case NIX_TXSCH_LVL_TL3:
459                 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
460                 k++;
461                 break;
462         case NIX_TXSCH_LVL_TL2:
463                 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
464                 k++;
465                 break;
466         case NIX_TXSCH_LVL_TL1:
467                 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
468                 k++;
469                 break;
470         default:
471                 break;
472         }
473
474         return k;
475 }
476
477 static int
478 populate_tm_reg(struct otx2_eth_dev *dev,
479                 struct otx2_nix_tm_node *tm_node)
480 {
481         struct otx2_nix_tm_shaper_profile *profile;
482         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
483         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
484         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
485         struct otx2_mbox *mbox = dev->mbox;
486         uint64_t parent = 0, child = 0;
487         uint32_t hw_lvl, rr_prio, schq;
488         struct nix_txschq_config *req;
489         int rc = -EFAULT;
490         uint8_t k = 0;
491
492         memset(regval_mask, 0, sizeof(regval_mask));
493         profile = nix_tm_shaper_profile_search(dev,
494                                         tm_node->params.shaper_profile_id);
495         rr_prio = tm_node->rr_prio;
496         hw_lvl = tm_node->hw_lvl;
497         schq = tm_node->hw_id;
498
499         /* Root node will not have a parent node */
500         if (hw_lvl == dev->otx2_tm_root_lvl)
501                 parent = tm_node->parent_hw_id;
502         else
503                 parent = tm_node->parent->hw_id;
504
505         /* Do we need this trigger to configure TL1 */
506         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
507             hw_lvl == dev->otx2_tm_root_lvl) {
508                 rc = populate_tm_tl1_default(dev, parent);
509                 if (rc)
510                         goto error;
511         }
512
513         if (hw_lvl != NIX_TXSCH_LVL_SMQ)
514                 child = find_prio_anchor(dev, tm_node->id);
515
516         /* Override default rr_prio when TL1
517          * Static Priority is disabled
518          */
519         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
520             dev->tm_flags & NIX_TM_TL1_NO_SP) {
521                 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
522                 child = 0;
523         }
524
525         otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
526                     " prio_anchor %"PRIu64" rr_prio %u (%p)",
527                     nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
528                     parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
529
530         /* Prepare Topology and Link config */
531         switch (hw_lvl) {
532         case NIX_TXSCH_LVL_SMQ:
533
534                 /* Set xoff which will be cleared later */
535                 reg[k] = NIX_AF_SMQX_CFG(schq);
536                 regval[k] = BIT_ULL(50);
537                 regval_mask[k] = ~BIT_ULL(50);
538                 k++;
539
540                 /* Parent and schedule conf */
541                 reg[k] = NIX_AF_MDQX_PARENT(schq);
542                 regval[k] = parent << 16;
543                 k++;
544
545                 break;
546         case NIX_TXSCH_LVL_TL4:
547                 /* Parent and schedule conf */
548                 reg[k] = NIX_AF_TL4X_PARENT(schq);
549                 regval[k] = parent << 16;
550                 k++;
551
552                 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
553                 regval[k] = (child << 32) | (rr_prio << 1);
554                 k++;
555
556                 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
557                 if (otx2_dev_is_sdp(dev)) {
558                         reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
559                         regval[k] = BIT_ULL(12);
560                         k++;
561                 }
562                 break;
563         case NIX_TXSCH_LVL_TL3:
564                 /* Parent and schedule conf */
565                 reg[k] = NIX_AF_TL3X_PARENT(schq);
566                 regval[k] = parent << 16;
567                 k++;
568
569                 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
570                 regval[k] = (child << 32) | (rr_prio << 1);
571                 k++;
572
573                 /* Link configuration */
574                 if (!otx2_dev_is_sdp(dev) &&
575                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
576                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
577                                                 otx2_nix_get_link(dev));
578                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
579                         k++;
580                 }
581
582                 break;
583         case NIX_TXSCH_LVL_TL2:
584                 /* Parent and schedule conf */
585                 reg[k] = NIX_AF_TL2X_PARENT(schq);
586                 regval[k] = parent << 16;
587                 k++;
588
589                 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
590                 regval[k] = (child << 32) | (rr_prio << 1);
591                 k++;
592
593                 /* Link configuration */
594                 if (!otx2_dev_is_sdp(dev) &&
595                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
596                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
597                                                 otx2_nix_get_link(dev));
598                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
599                         k++;
600                 }
601
602                 break;
603         case NIX_TXSCH_LVL_TL1:
604                 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
605                 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
606                 k++;
607
608                 break;
609         }
610
611         /* Prepare schedule config */
612         k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
613
614         /* Prepare shaping config */
615         k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
616
617         if (!k)
618                 return 0;
619
620         /* Copy and send config mbox */
621         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
622         req->lvl = hw_lvl;
623         req->num_regs = k;
624
625         otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
626         otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
627         otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
628
629         rc = otx2_mbox_process(mbox);
630         if (rc)
631                 goto error;
632
633         return 0;
634 error:
635         otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
636         return rc;
637 }
638
639
640 static int
641 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
642 {
643         struct otx2_nix_tm_node *tm_node;
644         uint32_t hw_lvl;
645         int rc = 0;
646
647         for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
648                 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
649                         if (tm_node->hw_lvl == hw_lvl &&
650                             tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
651                                 rc = populate_tm_reg(dev, tm_node);
652                                 if (rc)
653                                         goto exit;
654                         }
655                 }
656         }
657 exit:
658         return rc;
659 }
660
661 static struct otx2_nix_tm_node *
662 nix_tm_node_search(struct otx2_eth_dev *dev,
663                    uint32_t node_id, bool user)
664 {
665         struct otx2_nix_tm_node *tm_node;
666
667         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
668                 if (tm_node->id == node_id &&
669                     (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
670                         return tm_node;
671         }
672         return NULL;
673 }
674
675 static uint32_t
676 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
677 {
678         struct otx2_nix_tm_node *tm_node;
679         uint32_t rr_num = 0;
680
681         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
682                 if (!tm_node->parent)
683                         continue;
684
685                 if (!(tm_node->parent->id == parent_id))
686                         continue;
687
688                 if (tm_node->priority == priority)
689                         rr_num++;
690         }
691         return rr_num;
692 }
693
694 static int
695 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
696 {
697         struct otx2_nix_tm_node *tm_node_child;
698         struct otx2_nix_tm_node *tm_node;
699         struct otx2_nix_tm_node *parent;
700         uint32_t rr_num = 0;
701         uint32_t priority;
702
703         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
704                 if (!tm_node->parent)
705                         continue;
706                 /* Count group of children of same priority i.e are RR */
707                 parent = tm_node->parent;
708                 priority = tm_node->priority;
709                 rr_num = check_rr(dev, priority, parent->id);
710
711                 /* Assuming that multiple RR groups are
712                  * not configured based on capability.
713                  */
714                 if (rr_num > 1) {
715                         parent->rr_prio = priority;
716                         parent->rr_num = rr_num;
717                 }
718
719                 /* Find out static priority children that are not in RR */
720                 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
721                         if (!tm_node_child->parent)
722                                 continue;
723                         if (parent->id != tm_node_child->parent->id)
724                                 continue;
725                         if (parent->max_prio == UINT32_MAX &&
726                             tm_node_child->priority != parent->rr_prio)
727                                 parent->max_prio = 0;
728
729                         if (parent->max_prio < tm_node_child->priority &&
730                             parent->rr_prio != tm_node_child->priority)
731                                 parent->max_prio = tm_node_child->priority;
732                 }
733         }
734
735         return 0;
736 }
737
738 static int
739 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
740                         uint32_t parent_node_id, uint32_t priority,
741                         uint32_t weight, uint16_t hw_lvl,
742                         uint16_t lvl, bool user,
743                         struct rte_tm_node_params *params)
744 {
745         struct otx2_nix_tm_shaper_profile *profile;
746         struct otx2_nix_tm_node *tm_node, *parent_node;
747         struct shaper_params cir, pir;
748         uint32_t profile_id;
749
750         profile_id = params->shaper_profile_id;
751         profile = nix_tm_shaper_profile_search(dev, profile_id);
752
753         parent_node = nix_tm_node_search(dev, parent_node_id, user);
754
755         tm_node = rte_zmalloc("otx2_nix_tm_node",
756                               sizeof(struct otx2_nix_tm_node), 0);
757         if (!tm_node)
758                 return -ENOMEM;
759
760         tm_node->lvl = lvl;
761         tm_node->hw_lvl = hw_lvl;
762
763         /* Maintain minimum weight */
764         if (!weight)
765                 weight = 1;
766
767         tm_node->id = node_id;
768         tm_node->priority = priority;
769         tm_node->weight = weight;
770         tm_node->rr_prio = 0xf;
771         tm_node->max_prio = UINT32_MAX;
772         tm_node->hw_id = UINT32_MAX;
773         tm_node->flags = 0;
774         if (user)
775                 tm_node->flags = NIX_TM_NODE_USER;
776         rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
777
778         if (profile)
779                 profile->reference_count++;
780
781         memset(&cir, 0, sizeof(cir));
782         memset(&pir, 0, sizeof(pir));
783         shaper_config_to_nix(profile, &cir, &pir);
784
785         tm_node->parent = parent_node;
786         tm_node->parent_hw_id = UINT32_MAX;
787         /* C0 doesn't support STALL when both PIR & CIR are enabled */
788         if (lvl < OTX2_TM_LVL_QUEUE &&
789             otx2_dev_is_96xx_Cx(dev) &&
790             pir.rate && cir.rate)
791                 tm_node->red_algo = NIX_REDALG_DISCARD;
792         else
793                 tm_node->red_algo = NIX_REDALG_STD;
794
795         TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
796
797         return 0;
798 }
799
800 static int
801 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
802 {
803         struct otx2_nix_tm_shaper_profile *shaper_profile;
804
805         while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
806                 if (shaper_profile->reference_count)
807                         otx2_tm_dbg("Shaper profile %u has non zero references",
808                                     shaper_profile->shaper_profile_id);
809                 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
810                 rte_free(shaper_profile);
811         }
812
813         return 0;
814 }
815
816 static int
817 nix_clear_path_xoff(struct otx2_eth_dev *dev,
818                     struct otx2_nix_tm_node *tm_node)
819 {
820         struct nix_txschq_config *req;
821         struct otx2_nix_tm_node *p;
822         int rc;
823
824         /* Manipulating SW_XOFF not supported on Ax */
825         if (otx2_dev_is_Ax(dev))
826                 return 0;
827
828         /* Enable nodes in path for flush to succeed */
829         if (!nix_tm_is_leaf(dev, tm_node->lvl))
830                 p = tm_node;
831         else
832                 p = tm_node->parent;
833         while (p) {
834                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
835                     (p->flags & NIX_TM_NODE_HWRES)) {
836                         req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
837                         req->lvl = p->hw_lvl;
838                         req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
839                                                            req->regval);
840                         rc = otx2_mbox_process(dev->mbox);
841                         if (rc)
842                                 return rc;
843
844                         p->flags |= NIX_TM_NODE_ENABLED;
845                 }
846                 p = p->parent;
847         }
848
849         return 0;
850 }
851
852 static int
853 nix_smq_xoff(struct otx2_eth_dev *dev,
854              struct otx2_nix_tm_node *tm_node,
855              bool enable)
856 {
857         struct otx2_mbox *mbox = dev->mbox;
858         struct nix_txschq_config *req;
859         uint16_t smq;
860         int rc;
861
862         smq = tm_node->hw_id;
863         otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
864                     enable ? "enable" : "disable");
865
866         rc = nix_clear_path_xoff(dev, tm_node);
867         if (rc)
868                 return rc;
869
870         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
871         req->lvl = NIX_TXSCH_LVL_SMQ;
872         req->num_regs = 1;
873
874         req->reg[0] = NIX_AF_SMQX_CFG(smq);
875         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
876         req->regval_mask[0] = enable ?
877                                 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
878
879         return otx2_mbox_process(mbox);
880 }
881
882 int
883 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
884 {
885         struct otx2_eth_txq *txq = __txq;
886         struct npa_aq_enq_req *req;
887         struct npa_aq_enq_rsp *rsp;
888         struct otx2_npa_lf *lf;
889         struct otx2_mbox *mbox;
890         uint64_t aura_handle;
891         int rc;
892
893         otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
894                     enable ? "enable" : "disable");
895
896         lf = otx2_npa_lf_obj_get();
897         if (!lf)
898                 return -EFAULT;
899         mbox = lf->mbox;
900         /* Set/clear sqb aura fc_ena */
901         aura_handle = txq->sqb_pool->pool_id;
902         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
903
904         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
905         req->ctype = NPA_AQ_CTYPE_AURA;
906         req->op = NPA_AQ_INSTOP_WRITE;
907         /* Below is not needed for aura writes but AF driver needs it */
908         /* AF will translate to associated poolctx */
909         req->aura.pool_addr = req->aura_id;
910
911         req->aura.fc_ena = enable;
912         req->aura_mask.fc_ena = 1;
913
914         rc = otx2_mbox_process(mbox);
915         if (rc)
916                 return rc;
917
918         /* Read back npa aura ctx */
919         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
920
921         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
922         req->ctype = NPA_AQ_CTYPE_AURA;
923         req->op = NPA_AQ_INSTOP_READ;
924
925         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
926         if (rc)
927                 return rc;
928
929         /* Init when enabled as there might be no triggers */
930         if (enable)
931                 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
932         else
933                 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
934         /* Sync write barrier */
935         rte_wmb();
936
937         return 0;
938 }
939
940 static int
941 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
942 {
943         uint16_t sqb_cnt, head_off, tail_off;
944         struct otx2_eth_dev *dev = txq->dev;
945         uint64_t wdata, val, prev;
946         uint16_t sq = txq->sq;
947         int64_t *regaddr;
948         uint64_t timeout;/* 10's of usec */
949
950         /* Wait for enough time based on shaper min rate */
951         timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
952         timeout = timeout / dev->tm_rate_min;
953         if (!timeout)
954                 timeout = 10000;
955
956         wdata = ((uint64_t)sq << 32);
957         regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
958         val = otx2_atomic64_add_nosync(wdata, regaddr);
959
960         /* Spin multiple iterations as "txq->fc_cache_pkts" can still
961          * have space to send pkts even though fc_mem is disabled
962          */
963
964         while (true) {
965                 prev = val;
966                 rte_delay_us(10);
967                 val = otx2_atomic64_add_nosync(wdata, regaddr);
968                 /* Continue on error */
969                 if (val & BIT_ULL(63))
970                         continue;
971
972                 if (prev != val)
973                         continue;
974
975                 sqb_cnt = val & 0xFFFF;
976                 head_off = (val >> 20) & 0x3F;
977                 tail_off = (val >> 28) & 0x3F;
978
979                 /* SQ reached quiescent state */
980                 if (sqb_cnt <= 1 && head_off == tail_off &&
981                     (*txq->fc_mem == txq->nb_sqb_bufs)) {
982                         break;
983                 }
984
985                 /* Timeout */
986                 if (!timeout)
987                         goto exit;
988                 timeout--;
989         }
990
991         return 0;
992 exit:
993         otx2_nix_tm_dump(dev);
994         return -EFAULT;
995 }
996
997 /* Flush and disable tx queue and its parent SMQ */
998 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
999 {
1000         struct otx2_nix_tm_node *tm_node, *sibling;
1001         struct otx2_eth_txq *txq;
1002         struct otx2_eth_dev *dev;
1003         uint16_t sq;
1004         bool user;
1005         int rc;
1006
1007         txq = _txq;
1008         dev = txq->dev;
1009         sq = txq->sq;
1010
1011         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1012
1013         /* Find the node for this SQ */
1014         tm_node = nix_tm_node_search(dev, sq, user);
1015         if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1016                 otx2_err("Invalid node/state for sq %u", sq);
1017                 return -EFAULT;
1018         }
1019
1020         /* Enable CGX RXTX to drain pkts */
1021         if (!dev_started) {
1022                 /* Though it enables both RX MCAM Entries and CGX Link
1023                  * we assume all the rx queues are stopped way back.
1024                  */
1025                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1026                 rc = otx2_mbox_process(dev->mbox);
1027                 if (rc) {
1028                         otx2_err("cgx start failed, rc=%d", rc);
1029                         return rc;
1030                 }
1031         }
1032
1033         /* Disable smq xoff for case it was enabled earlier */
1034         rc = nix_smq_xoff(dev, tm_node->parent, false);
1035         if (rc) {
1036                 otx2_err("Failed to enable smq %u, rc=%d",
1037                          tm_node->parent->hw_id, rc);
1038                 return rc;
1039         }
1040
1041         /* As per HRM, to disable an SQ, all other SQ's
1042          * that feed to same SMQ must be paused before SMQ flush.
1043          */
1044         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1045                 if (sibling->parent != tm_node->parent)
1046                         continue;
1047                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1048                         continue;
1049
1050                 sq = sibling->id;
1051                 txq = dev->eth_dev->data->tx_queues[sq];
1052                 if (!txq)
1053                         continue;
1054
1055                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1056                 if (rc) {
1057                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1058                         goto cleanup;
1059                 }
1060
1061                 /* Wait for sq entries to be flushed */
1062                 rc = nix_txq_flush_sq_spin(txq);
1063                 if (rc) {
1064                         otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1065                         return rc;
1066                 }
1067         }
1068
1069         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1070
1071         /* Disable and flush */
1072         rc = nix_smq_xoff(dev, tm_node->parent, true);
1073         if (rc) {
1074                 otx2_err("Failed to disable smq %u, rc=%d",
1075                          tm_node->parent->hw_id, rc);
1076                 goto cleanup;
1077         }
1078 cleanup:
1079         /* Restore cgx state */
1080         if (!dev_started) {
1081                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1082                 rc |= otx2_mbox_process(dev->mbox);
1083         }
1084
1085         return rc;
1086 }
1087
1088 int otx2_nix_sq_flush_post(void *_txq)
1089 {
1090         struct otx2_nix_tm_node *tm_node, *sibling;
1091         struct otx2_eth_txq *txq = _txq;
1092         struct otx2_eth_txq *s_txq;
1093         struct otx2_eth_dev *dev;
1094         bool once = false;
1095         uint16_t sq, s_sq;
1096         bool user;
1097         int rc;
1098
1099         dev = txq->dev;
1100         sq = txq->sq;
1101         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1102
1103         /* Find the node for this SQ */
1104         tm_node = nix_tm_node_search(dev, sq, user);
1105         if (!tm_node) {
1106                 otx2_err("Invalid node for sq %u", sq);
1107                 return -EFAULT;
1108         }
1109
1110         /* Enable all the siblings back */
1111         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1112                 if (sibling->parent != tm_node->parent)
1113                         continue;
1114
1115                 if (sibling->id == sq)
1116                         continue;
1117
1118                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1119                         continue;
1120
1121                 s_sq = sibling->id;
1122                 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1123                 if (!s_txq)
1124                         continue;
1125
1126                 if (!once) {
1127                         /* Enable back if any SQ is still present */
1128                         rc = nix_smq_xoff(dev, tm_node->parent, false);
1129                         if (rc) {
1130                                 otx2_err("Failed to enable smq %u, rc=%d",
1131                                          tm_node->parent->hw_id, rc);
1132                                 return rc;
1133                         }
1134                         once = true;
1135                 }
1136
1137                 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1138                 if (rc) {
1139                         otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1140                         return rc;
1141                 }
1142         }
1143
1144         return 0;
1145 }
1146
1147 static int
1148 nix_sq_sched_data(struct otx2_eth_dev *dev,
1149                   struct otx2_nix_tm_node *tm_node,
1150                   bool rr_quantum_only)
1151 {
1152         struct rte_eth_dev *eth_dev = dev->eth_dev;
1153         struct otx2_mbox *mbox = dev->mbox;
1154         uint16_t sq = tm_node->id, smq;
1155         struct nix_aq_enq_req *req;
1156         uint64_t rr_quantum;
1157         int rc;
1158
1159         smq = tm_node->parent->hw_id;
1160         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1161
1162         if (rr_quantum_only)
1163                 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1164         else
1165                 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1166                             sq, smq, rr_quantum);
1167
1168         if (sq > eth_dev->data->nb_tx_queues)
1169                 return -EFAULT;
1170
1171         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1172         req->qidx = sq;
1173         req->ctype = NIX_AQ_CTYPE_SQ;
1174         req->op = NIX_AQ_INSTOP_WRITE;
1175
1176         /* smq update only when needed */
1177         if (!rr_quantum_only) {
1178                 req->sq.smq = smq;
1179                 req->sq_mask.smq = ~req->sq_mask.smq;
1180         }
1181         req->sq.smq_rr_quantum = rr_quantum;
1182         req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1183
1184         rc = otx2_mbox_process(mbox);
1185         if (rc)
1186                 otx2_err("Failed to set smq, rc=%d", rc);
1187         return rc;
1188 }
1189
1190 int otx2_nix_sq_enable(void *_txq)
1191 {
1192         struct otx2_eth_txq *txq = _txq;
1193         int rc;
1194
1195         /* Enable sqb_aura fc */
1196         rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1197         if (rc) {
1198                 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1199                 return rc;
1200         }
1201
1202         return 0;
1203 }
1204
1205 static int
1206 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1207                       uint32_t flags, bool hw_only)
1208 {
1209         struct otx2_nix_tm_shaper_profile *profile;
1210         struct otx2_nix_tm_node *tm_node, *next_node;
1211         struct otx2_mbox *mbox = dev->mbox;
1212         struct nix_txsch_free_req *req;
1213         uint32_t profile_id;
1214         int rc = 0;
1215
1216         next_node = TAILQ_FIRST(&dev->node_list);
1217         while (next_node) {
1218                 tm_node = next_node;
1219                 next_node = TAILQ_NEXT(tm_node, node);
1220
1221                 /* Check for only requested nodes */
1222                 if ((tm_node->flags & flags_mask) != flags)
1223                         continue;
1224
1225                 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1226                     tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1227                     tm_node->flags & NIX_TM_NODE_HWRES) {
1228                         /* Free specific HW resource */
1229                         otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1230                                     nix_hwlvl2str(tm_node->hw_lvl),
1231                                     tm_node->hw_id, tm_node->lvl,
1232                                     tm_node->id, tm_node);
1233
1234                         rc = nix_clear_path_xoff(dev, tm_node);
1235                         if (rc)
1236                                 return rc;
1237
1238                         req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1239                         req->flags = 0;
1240                         req->schq_lvl = tm_node->hw_lvl;
1241                         req->schq = tm_node->hw_id;
1242                         rc = otx2_mbox_process(mbox);
1243                         if (rc)
1244                                 return rc;
1245                         tm_node->flags &= ~NIX_TM_NODE_HWRES;
1246                 }
1247
1248                 /* Leave software elements if needed */
1249                 if (hw_only)
1250                         continue;
1251
1252                 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1253                             tm_node->lvl, tm_node->id, tm_node);
1254
1255                 profile_id = tm_node->params.shaper_profile_id;
1256                 profile = nix_tm_shaper_profile_search(dev, profile_id);
1257                 if (profile)
1258                         profile->reference_count--;
1259
1260                 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1261                 rte_free(tm_node);
1262         }
1263
1264         if (!flags_mask) {
1265                 /* Free all hw resources */
1266                 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1267                 req->flags = TXSCHQ_FREE_ALL;
1268
1269                 return otx2_mbox_process(mbox);
1270         }
1271
1272         return rc;
1273 }
1274
1275 static uint8_t
1276 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1277                        struct nix_txsch_alloc_rsp *rsp)
1278 {
1279         uint16_t schq;
1280         uint8_t lvl;
1281
1282         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1283                 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1284                         dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1285                         dev->txschq_contig_list[lvl][schq] =
1286                                 rsp->schq_contig_list[lvl][schq];
1287                 }
1288
1289                 dev->txschq[lvl] = rsp->schq[lvl];
1290                 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1291         }
1292         return 0;
1293 }
1294
1295 static int
1296 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1297                          struct otx2_nix_tm_node *child,
1298                          struct otx2_nix_tm_node *parent)
1299 {
1300         uint32_t hw_id, schq_con_index, prio_offset;
1301         uint32_t l_id, schq_index;
1302
1303         otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1304                     nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1305
1306         child->flags |= NIX_TM_NODE_HWRES;
1307
1308         /* Process root nodes */
1309         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1310             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1311                 int idx = 0;
1312                 uint32_t tschq_con_index;
1313
1314                 l_id = child->hw_lvl;
1315                 tschq_con_index = dev->txschq_contig_index[l_id];
1316                 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1317                 child->hw_id = hw_id;
1318                 dev->txschq_contig_index[l_id]++;
1319                 /* Update TL1 hw_id for its parent for config purpose */
1320                 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1321                 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1322                 child->parent_hw_id = hw_id;
1323                 return 0;
1324         }
1325         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1326             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1327                 uint32_t tschq_con_index;
1328
1329                 l_id = child->hw_lvl;
1330                 tschq_con_index = dev->txschq_index[l_id];
1331                 hw_id = dev->txschq_list[l_id][tschq_con_index];
1332                 child->hw_id = hw_id;
1333                 dev->txschq_index[l_id]++;
1334                 return 0;
1335         }
1336
1337         /* Process children with parents */
1338         l_id = child->hw_lvl;
1339         schq_index = dev->txschq_index[l_id];
1340         schq_con_index = dev->txschq_contig_index[l_id];
1341
1342         if (child->priority == parent->rr_prio) {
1343                 hw_id = dev->txschq_list[l_id][schq_index];
1344                 child->hw_id = hw_id;
1345                 child->parent_hw_id = parent->hw_id;
1346                 dev->txschq_index[l_id]++;
1347         } else {
1348                 prio_offset = schq_con_index + child->priority;
1349                 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1350                 child->hw_id = hw_id;
1351         }
1352         return 0;
1353 }
1354
1355 static int
1356 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1357 {
1358         struct otx2_nix_tm_node *parent, *child;
1359         uint32_t child_hw_lvl, con_index_inc, i;
1360
1361         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1362                 TAILQ_FOREACH(parent, &dev->node_list, node) {
1363                         child_hw_lvl = parent->hw_lvl - 1;
1364                         if (parent->hw_lvl != i)
1365                                 continue;
1366                         TAILQ_FOREACH(child, &dev->node_list, node) {
1367                                 if (!child->parent)
1368                                         continue;
1369                                 if (child->parent->id != parent->id)
1370                                         continue;
1371                                 nix_tm_assign_id_to_node(dev, child, parent);
1372                         }
1373
1374                         con_index_inc = parent->max_prio + 1;
1375                         dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1376
1377                         /*
1378                          * Explicitly assign id to parent node if it
1379                          * doesn't have a parent
1380                          */
1381                         if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1382                                 nix_tm_assign_id_to_node(dev, parent, NULL);
1383                 }
1384         }
1385         return 0;
1386 }
1387
1388 static uint8_t
1389 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1390                       struct nix_txsch_alloc_req *req, uint8_t lvl)
1391 {
1392         struct otx2_nix_tm_node *tm_node;
1393         uint8_t contig_count;
1394
1395         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1396                 if (lvl == tm_node->hw_lvl) {
1397                         req->schq[lvl - 1] += tm_node->rr_num;
1398                         if (tm_node->max_prio != UINT32_MAX) {
1399                                 contig_count = tm_node->max_prio + 1;
1400                                 req->schq_contig[lvl - 1] += contig_count;
1401                         }
1402                 }
1403                 if (lvl == dev->otx2_tm_root_lvl &&
1404                     dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1405                     tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1406                         req->schq_contig[dev->otx2_tm_root_lvl]++;
1407                 }
1408         }
1409
1410         req->schq[NIX_TXSCH_LVL_TL1] = 1;
1411         req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1412
1413         return 0;
1414 }
1415
1416 static int
1417 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1418                           struct nix_txsch_alloc_req *req)
1419 {
1420         uint8_t i;
1421
1422         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1423                 nix_tm_count_req_schq(dev, req, i);
1424
1425         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1426                 dev->txschq_index[i] = 0;
1427                 dev->txschq_contig_index[i] = 0;
1428         }
1429         return 0;
1430 }
1431
1432 static int
1433 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1434 {
1435         struct otx2_mbox *mbox = dev->mbox;
1436         struct nix_txsch_alloc_req *req;
1437         struct nix_txsch_alloc_rsp *rsp;
1438         int rc;
1439
1440         req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1441
1442         rc = nix_tm_prepare_txschq_req(dev, req);
1443         if (rc)
1444                 return rc;
1445
1446         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1447         if (rc)
1448                 return rc;
1449
1450         nix_tm_copy_rsp_to_dev(dev, rsp);
1451         dev->link_cfg_lvl = rsp->link_cfg_lvl;
1452
1453         nix_tm_assign_hw_id(dev);
1454         return 0;
1455 }
1456
1457 static int
1458 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1459 {
1460         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1461         struct otx2_nix_tm_node *tm_node;
1462         struct otx2_eth_txq *txq;
1463         uint16_t sq;
1464         int rc;
1465
1466         nix_tm_update_parent_info(dev);
1467
1468         rc = nix_tm_send_txsch_alloc_msg(dev);
1469         if (rc) {
1470                 otx2_err("TM failed to alloc tm resources=%d", rc);
1471                 return rc;
1472         }
1473
1474         rc = nix_tm_txsch_reg_config(dev);
1475         if (rc) {
1476                 otx2_err("TM failed to configure sched registers=%d", rc);
1477                 return rc;
1478         }
1479
1480         /* Trigger MTU recalculate as SMQ needs MTU conf */
1481         if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1482                 rc = otx2_nix_recalc_mtu(eth_dev);
1483                 if (rc) {
1484                         otx2_err("TM MTU update failed, rc=%d", rc);
1485                         return rc;
1486                 }
1487         }
1488
1489         /* Mark all non-leaf's as enabled */
1490         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1491                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1492                         tm_node->flags |= NIX_TM_NODE_ENABLED;
1493         }
1494
1495         if (!xmit_enable)
1496                 return 0;
1497
1498         /* Update SQ Sched Data while SQ is idle */
1499         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1500                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1501                         continue;
1502
1503                 rc = nix_sq_sched_data(dev, tm_node, false);
1504                 if (rc) {
1505                         otx2_err("SQ %u sched update failed, rc=%d",
1506                                  tm_node->id, rc);
1507                         return rc;
1508                 }
1509         }
1510
1511         /* Finally XON all SMQ's */
1512         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1513                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1514                         continue;
1515
1516                 rc = nix_smq_xoff(dev, tm_node, false);
1517                 if (rc) {
1518                         otx2_err("Failed to enable smq %u, rc=%d",
1519                                  tm_node->hw_id, rc);
1520                         return rc;
1521                 }
1522         }
1523
1524         /* Enable xmit as all the topology is ready */
1525         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1526                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1527                         continue;
1528
1529                 sq = tm_node->id;
1530                 txq = eth_dev->data->tx_queues[sq];
1531
1532                 rc = otx2_nix_sq_enable(txq);
1533                 if (rc) {
1534                         otx2_err("TM sw xon failed on SQ %u, rc=%d",
1535                                  tm_node->id, rc);
1536                         return rc;
1537                 }
1538                 tm_node->flags |= NIX_TM_NODE_ENABLED;
1539         }
1540
1541         return 0;
1542 }
1543
1544 static int
1545 send_tm_reqval(struct otx2_mbox *mbox,
1546                struct nix_txschq_config *req,
1547                struct rte_tm_error *error)
1548 {
1549         int rc;
1550
1551         if (!req->num_regs ||
1552             req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1553                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1554                 error->message = "invalid config";
1555                 return -EIO;
1556         }
1557
1558         rc = otx2_mbox_process(mbox);
1559         if (rc) {
1560                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1561                 error->message = "unexpected fatal error";
1562         }
1563         return rc;
1564 }
1565
1566 static uint16_t
1567 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1568 {
1569         if (nix_tm_have_tl1_access(dev)) {
1570                 switch (lvl) {
1571                 case OTX2_TM_LVL_ROOT:
1572                         return NIX_TXSCH_LVL_TL1;
1573                 case OTX2_TM_LVL_SCH1:
1574                         return NIX_TXSCH_LVL_TL2;
1575                 case OTX2_TM_LVL_SCH2:
1576                         return NIX_TXSCH_LVL_TL3;
1577                 case OTX2_TM_LVL_SCH3:
1578                         return NIX_TXSCH_LVL_TL4;
1579                 case OTX2_TM_LVL_SCH4:
1580                         return NIX_TXSCH_LVL_SMQ;
1581                 default:
1582                         return NIX_TXSCH_LVL_CNT;
1583                 }
1584         } else {
1585                 switch (lvl) {
1586                 case OTX2_TM_LVL_ROOT:
1587                         return NIX_TXSCH_LVL_TL2;
1588                 case OTX2_TM_LVL_SCH1:
1589                         return NIX_TXSCH_LVL_TL3;
1590                 case OTX2_TM_LVL_SCH2:
1591                         return NIX_TXSCH_LVL_TL4;
1592                 case OTX2_TM_LVL_SCH3:
1593                         return NIX_TXSCH_LVL_SMQ;
1594                 default:
1595                         return NIX_TXSCH_LVL_CNT;
1596                 }
1597         }
1598 }
1599
1600 static uint16_t
1601 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1602 {
1603         if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1604                 return 0;
1605
1606         /* MDQ doesn't support SP */
1607         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1608                 return 0;
1609
1610         /* PF's TL1 with VF's enabled doesn't support SP */
1611         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1612             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1613              (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1614                 return 0;
1615
1616         return TXSCH_TLX_SP_PRIO_MAX - 1;
1617 }
1618
1619
1620 static int
1621 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1622               uint32_t parent_id, uint32_t priority,
1623               struct rte_tm_error *error)
1624 {
1625         uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1626         struct otx2_nix_tm_node *tm_node;
1627         uint32_t rr_num = 0;
1628         int i;
1629
1630         /* Validate priority against max */
1631         if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1632                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1633                 error->message = "unsupported priority value";
1634                 return -EINVAL;
1635         }
1636
1637         if (parent_id == RTE_TM_NODE_ID_NULL)
1638                 return 0;
1639
1640         memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1641         priorities[priority] = 1;
1642
1643         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1644                 if (!tm_node->parent)
1645                         continue;
1646
1647                 if (!(tm_node->flags & NIX_TM_NODE_USER))
1648                         continue;
1649
1650                 if (tm_node->parent->id != parent_id)
1651                         continue;
1652
1653                 priorities[tm_node->priority]++;
1654         }
1655
1656         for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1657                 if (priorities[i] > 1)
1658                         rr_num++;
1659
1660         /* At max, one rr groups per parent */
1661         if (rr_num > 1) {
1662                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1663                 error->message = "multiple DWRR node priority";
1664                 return -EINVAL;
1665         }
1666
1667         /* Check for previous priority to avoid holes in priorities */
1668         if (priority && !priorities[priority - 1]) {
1669                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1670                 error->message = "priority not in order";
1671                 return -EINVAL;
1672         }
1673
1674         return 0;
1675 }
1676
1677 static int
1678 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1679             uint64_t *regval, uint32_t hw_lvl)
1680 {
1681         volatile struct nix_txschq_config *req;
1682         struct nix_txschq_config *rsp;
1683         int rc;
1684
1685         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1686         req->read = 1;
1687         req->lvl = hw_lvl;
1688         req->reg[0] = reg;
1689         req->num_regs = 1;
1690
1691         rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1692         if (rc)
1693                 return rc;
1694         *regval = rsp->regval[0];
1695         return 0;
1696 }
1697
1698 /* Search for min rate in topology */
1699 static void
1700 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1701 {
1702         struct otx2_nix_tm_shaper_profile *profile;
1703         uint64_t rate_min = 1E9; /* 1 Gbps */
1704
1705         TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1706                 if (profile->params.peak.rate &&
1707                     profile->params.peak.rate < rate_min)
1708                         rate_min = profile->params.peak.rate;
1709
1710                 if (profile->params.committed.rate &&
1711                     profile->params.committed.rate < rate_min)
1712                         rate_min = profile->params.committed.rate;
1713         }
1714
1715         dev->tm_rate_min = rate_min;
1716 }
1717
1718 static int
1719 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1720 {
1721         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1722         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1723         uint16_t sqb_cnt, head_off, tail_off;
1724         struct otx2_nix_tm_node *tm_node;
1725         struct otx2_eth_txq *txq;
1726         uint64_t wdata, val;
1727         int i, rc;
1728
1729         otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1730
1731         /* Enable CGX RXTX to drain pkts */
1732         if (!eth_dev->data->dev_started) {
1733                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1734                 rc = otx2_mbox_process(dev->mbox);
1735                 if (rc)
1736                         return rc;
1737         }
1738
1739         /* XON all SMQ's */
1740         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1741                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1742                         continue;
1743                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1744                         continue;
1745
1746                 rc = nix_smq_xoff(dev, tm_node, false);
1747                 if (rc) {
1748                         otx2_err("Failed to enable smq %u, rc=%d",
1749                                  tm_node->hw_id, rc);
1750                         goto cleanup;
1751                 }
1752         }
1753
1754         /* Flush all tx queues */
1755         for (i = 0; i < sq_cnt; i++) {
1756                 txq = eth_dev->data->tx_queues[i];
1757
1758                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1759                 if (rc) {
1760                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1761                         goto cleanup;
1762                 }
1763
1764                 /* Wait for sq entries to be flushed */
1765                 rc = nix_txq_flush_sq_spin(txq);
1766                 if (rc) {
1767                         otx2_err("Failed to drain sq, rc=%d\n", rc);
1768                         goto cleanup;
1769                 }
1770         }
1771
1772         /* XOFF & Flush all SMQ's. HRM mandates
1773          * all SQ's empty before SMQ flush is issued.
1774          */
1775         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1776                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1777                         continue;
1778                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1779                         continue;
1780
1781                 rc = nix_smq_xoff(dev, tm_node, true);
1782                 if (rc) {
1783                         otx2_err("Failed to enable smq %u, rc=%d",
1784                                  tm_node->hw_id, rc);
1785                         goto cleanup;
1786                 }
1787         }
1788
1789         /* Verify sanity of all tx queues */
1790         for (i = 0; i < sq_cnt; i++) {
1791                 txq = eth_dev->data->tx_queues[i];
1792
1793                 wdata = ((uint64_t)txq->sq << 32);
1794                 val = otx2_atomic64_add_nosync(wdata,
1795                                (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1796
1797                 sqb_cnt = val & 0xFFFF;
1798                 head_off = (val >> 20) & 0x3F;
1799                 tail_off = (val >> 28) & 0x3F;
1800
1801                 if (sqb_cnt > 1 || head_off != tail_off ||
1802                     (*txq->fc_mem != txq->nb_sqb_bufs))
1803                         otx2_err("Failed to gracefully flush sq %u", txq->sq);
1804         }
1805
1806 cleanup:
1807         /* restore cgx state */
1808         if (!eth_dev->data->dev_started) {
1809                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1810                 rc |= otx2_mbox_process(dev->mbox);
1811         }
1812
1813         return rc;
1814 }
1815
1816 static int
1817 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1818                           int *is_leaf, struct rte_tm_error *error)
1819 {
1820         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1821         struct otx2_nix_tm_node *tm_node;
1822
1823         if (is_leaf == NULL) {
1824                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1825                 return -EINVAL;
1826         }
1827
1828         tm_node = nix_tm_node_search(dev, node_id, true);
1829         if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1830                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1831                 return -EINVAL;
1832         }
1833         if (nix_tm_is_leaf(dev, tm_node->lvl))
1834                 *is_leaf = true;
1835         else
1836                 *is_leaf = false;
1837         return 0;
1838 }
1839
1840 static int
1841 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1842                      struct rte_tm_capabilities *cap,
1843                      struct rte_tm_error *error)
1844 {
1845         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1846         struct otx2_mbox *mbox = dev->mbox;
1847         int rc, max_nr_nodes = 0, i;
1848         struct free_rsrcs_rsp *rsp;
1849
1850         memset(cap, 0, sizeof(*cap));
1851
1852         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1853         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1854         if (rc) {
1855                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1856                 error->message = "unexpected fatal error";
1857                 return rc;
1858         }
1859
1860         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1861                 max_nr_nodes += rsp->schq[i];
1862
1863         cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1864         /* TL1 level is reserved for PF */
1865         cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1866                                 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1867         cap->non_leaf_nodes_identical = 1;
1868         cap->leaf_nodes_identical = 1;
1869
1870         /* Shaper Capabilities */
1871         cap->shaper_private_n_max = max_nr_nodes;
1872         cap->shaper_n_max = max_nr_nodes;
1873         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1874         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1875         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1876         cap->shaper_pkt_length_adjust_min = 0;
1877         cap->shaper_pkt_length_adjust_max = 0;
1878
1879         /* Schedule Capabilities */
1880         cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1881         cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1882         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1883         cap->sched_wfq_n_groups_max = 1;
1884         cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1885
1886         cap->dynamic_update_mask =
1887                 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1888                 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1889         cap->stats_mask =
1890                 RTE_TM_STATS_N_PKTS |
1891                 RTE_TM_STATS_N_BYTES |
1892                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1893                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1894
1895         for (i = 0; i < RTE_COLORS; i++) {
1896                 cap->mark_vlan_dei_supported[i] = false;
1897                 cap->mark_ip_ecn_tcp_supported[i] = false;
1898                 cap->mark_ip_dscp_supported[i] = false;
1899         }
1900
1901         return 0;
1902 }
1903
1904 static int
1905 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1906                                    struct rte_tm_level_capabilities *cap,
1907                                    struct rte_tm_error *error)
1908 {
1909         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1910         struct otx2_mbox *mbox = dev->mbox;
1911         struct free_rsrcs_rsp *rsp;
1912         uint16_t hw_lvl;
1913         int rc;
1914
1915         memset(cap, 0, sizeof(*cap));
1916
1917         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1918         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1919         if (rc) {
1920                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1921                 error->message = "unexpected fatal error";
1922                 return rc;
1923         }
1924
1925         hw_lvl = nix_tm_lvl2nix(dev, lvl);
1926
1927         if (nix_tm_is_leaf(dev, lvl)) {
1928                 /* Leaf */
1929                 cap->n_nodes_max = dev->tm_leaf_cnt;
1930                 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1931                 cap->leaf_nodes_identical = 1;
1932                 cap->leaf.stats_mask =
1933                         RTE_TM_STATS_N_PKTS |
1934                         RTE_TM_STATS_N_BYTES;
1935
1936         } else if (lvl == OTX2_TM_LVL_ROOT) {
1937                 /* Root node, aka TL2(vf)/TL1(pf) */
1938                 cap->n_nodes_max = 1;
1939                 cap->n_nodes_nonleaf_max = 1;
1940                 cap->non_leaf_nodes_identical = 1;
1941
1942                 cap->nonleaf.shaper_private_supported = true;
1943                 cap->nonleaf.shaper_private_dual_rate_supported =
1944                         nix_tm_have_tl1_access(dev) ? false : true;
1945                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1946                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1947
1948                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
1949                 cap->nonleaf.sched_sp_n_priorities_max =
1950                                         nix_max_prio(dev, hw_lvl) + 1;
1951                 cap->nonleaf.sched_wfq_n_groups_max = 1;
1952                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1953
1954                 if (nix_tm_have_tl1_access(dev))
1955                         cap->nonleaf.stats_mask =
1956                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1957                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1958         } else if ((lvl < OTX2_TM_LVL_MAX) &&
1959                    (hw_lvl < NIX_TXSCH_LVL_CNT)) {
1960                 /* TL2, TL3, TL4, MDQ */
1961                 cap->n_nodes_max = rsp->schq[hw_lvl];
1962                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
1963                 cap->non_leaf_nodes_identical = 1;
1964
1965                 cap->nonleaf.shaper_private_supported = true;
1966                 cap->nonleaf.shaper_private_dual_rate_supported = true;
1967                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1968                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1969
1970                 /* MDQ doesn't support Strict Priority */
1971                 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1972                         cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
1973                 else
1974                         cap->nonleaf.sched_n_children_max =
1975                                 rsp->schq[hw_lvl - 1];
1976                 cap->nonleaf.sched_sp_n_priorities_max =
1977                         nix_max_prio(dev, hw_lvl) + 1;
1978                 cap->nonleaf.sched_wfq_n_groups_max = 1;
1979                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1980         } else {
1981                 /* unsupported level */
1982                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1983                 return rc;
1984         }
1985         return 0;
1986 }
1987
1988 static int
1989 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1990                           struct rte_tm_node_capabilities *cap,
1991                           struct rte_tm_error *error)
1992 {
1993         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1994         struct otx2_mbox *mbox = dev->mbox;
1995         struct otx2_nix_tm_node *tm_node;
1996         struct free_rsrcs_rsp *rsp;
1997         int rc, hw_lvl, lvl;
1998
1999         memset(cap, 0, sizeof(*cap));
2000
2001         tm_node = nix_tm_node_search(dev, node_id, true);
2002         if (!tm_node) {
2003                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2004                 error->message = "no such node";
2005                 return -EINVAL;
2006         }
2007
2008         hw_lvl = tm_node->hw_lvl;
2009         lvl = tm_node->lvl;
2010
2011         /* Leaf node */
2012         if (nix_tm_is_leaf(dev, lvl)) {
2013                 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2014                                         RTE_TM_STATS_N_BYTES;
2015                 return 0;
2016         }
2017
2018         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2019         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2020         if (rc) {
2021                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2022                 error->message = "unexpected fatal error";
2023                 return rc;
2024         }
2025
2026         /* Non Leaf Shaper */
2027         cap->shaper_private_supported = true;
2028         cap->shaper_private_dual_rate_supported =
2029                 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2030         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2031         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2032
2033         /* Non Leaf Scheduler */
2034         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2035                 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2036         else
2037                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2038
2039         cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2040         cap->nonleaf.sched_wfq_n_children_per_group_max =
2041                 cap->nonleaf.sched_n_children_max;
2042         cap->nonleaf.sched_wfq_n_groups_max = 1;
2043         cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2044
2045         if (hw_lvl == NIX_TXSCH_LVL_TL1)
2046                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2047                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2048         return 0;
2049 }
2050
2051 static int
2052 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2053                                uint32_t profile_id,
2054                                struct rte_tm_shaper_params *params,
2055                                struct rte_tm_error *error)
2056 {
2057         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2058         struct otx2_nix_tm_shaper_profile *profile;
2059
2060         profile = nix_tm_shaper_profile_search(dev, profile_id);
2061         if (profile) {
2062                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2063                 error->message = "shaper profile ID exist";
2064                 return -EINVAL;
2065         }
2066
2067         /* Committed rate and burst size can be enabled/disabled */
2068         if (params->committed.size || params->committed.rate) {
2069                 if (params->committed.size < MIN_SHAPER_BURST ||
2070                     params->committed.size > MAX_SHAPER_BURST) {
2071                         error->type =
2072                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2073                         return -EINVAL;
2074                 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2075                                                NULL, NULL, NULL)) {
2076                         error->type =
2077                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2078                         error->message = "shaper committed rate invalid";
2079                         return -EINVAL;
2080                 }
2081         }
2082
2083         /* Peak rate and burst size can be enabled/disabled */
2084         if (params->peak.size || params->peak.rate) {
2085                 if (params->peak.size < MIN_SHAPER_BURST ||
2086                     params->peak.size > MAX_SHAPER_BURST) {
2087                         error->type =
2088                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2089                         return -EINVAL;
2090                 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2091                                                NULL, NULL, NULL)) {
2092                         error->type =
2093                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2094                         error->message = "shaper peak rate invalid";
2095                         return -EINVAL;
2096                 }
2097         }
2098
2099         profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2100                               sizeof(struct otx2_nix_tm_shaper_profile), 0);
2101         if (!profile)
2102                 return -ENOMEM;
2103
2104         profile->shaper_profile_id = profile_id;
2105         rte_memcpy(&profile->params, params,
2106                    sizeof(struct rte_tm_shaper_params));
2107         TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2108
2109         otx2_tm_dbg("Added TM shaper profile %u, "
2110                     " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2111                     ", cbs %" PRIu64 " , adj %u",
2112                     profile_id,
2113                     params->peak.rate * 8,
2114                     params->peak.size,
2115                     params->committed.rate * 8,
2116                     params->committed.size,
2117                     params->pkt_length_adjust);
2118
2119         /* Translate rate as bits per second */
2120         profile->params.peak.rate = profile->params.peak.rate * 8;
2121         profile->params.committed.rate = profile->params.committed.rate * 8;
2122         /* Always use PIR for single rate shaping */
2123         if (!params->peak.rate && params->committed.rate) {
2124                 profile->params.peak = profile->params.committed;
2125                 memset(&profile->params.committed, 0,
2126                        sizeof(profile->params.committed));
2127         }
2128
2129         /* update min rate */
2130         nix_tm_shaper_profile_update_min(dev);
2131         return 0;
2132 }
2133
2134 static int
2135 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2136                                   uint32_t profile_id,
2137                                   struct rte_tm_error *error)
2138 {
2139         struct otx2_nix_tm_shaper_profile *profile;
2140         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2141
2142         profile = nix_tm_shaper_profile_search(dev, profile_id);
2143
2144         if (!profile) {
2145                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2146                 error->message = "shaper profile ID not exist";
2147                 return -EINVAL;
2148         }
2149
2150         if (profile->reference_count) {
2151                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2152                 error->message = "shaper profile in use";
2153                 return -EINVAL;
2154         }
2155
2156         otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2157         TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2158         rte_free(profile);
2159
2160         /* update min rate */
2161         nix_tm_shaper_profile_update_min(dev);
2162         return 0;
2163 }
2164
2165 static int
2166 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2167                      uint32_t parent_node_id, uint32_t priority,
2168                      uint32_t weight, uint32_t lvl,
2169                      struct rte_tm_node_params *params,
2170                      struct rte_tm_error *error)
2171 {
2172         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2173         struct otx2_nix_tm_node *parent_node;
2174         int rc, clear_on_fail = 0;
2175         uint32_t exp_next_lvl;
2176         uint16_t hw_lvl;
2177
2178         /* we don't support dynamic updates */
2179         if (dev->tm_flags & NIX_TM_COMMITTED) {
2180                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2181                 error->message = "dynamic update not supported";
2182                 return -EIO;
2183         }
2184
2185         /* Leaf nodes have to be same priority */
2186         if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2187                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2188                 error->message = "queue shapers must be priority 0";
2189                 return -EIO;
2190         }
2191
2192         parent_node = nix_tm_node_search(dev, parent_node_id, true);
2193
2194         /* find the right level */
2195         if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2196                 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2197                         lvl = OTX2_TM_LVL_ROOT;
2198                 } else if (parent_node) {
2199                         lvl = parent_node->lvl + 1;
2200                 } else {
2201                         /* Neigher proper parent nor proper level id given */
2202                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2203                         error->message = "invalid parent node id";
2204                         return -ERANGE;
2205                 }
2206         }
2207
2208         /* Translate rte_tm level id's to nix hw level id's */
2209         hw_lvl = nix_tm_lvl2nix(dev, lvl);
2210         if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2211             !nix_tm_is_leaf(dev, lvl)) {
2212                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2213                 error->message = "invalid level id";
2214                 return -ERANGE;
2215         }
2216
2217         if (node_id < dev->tm_leaf_cnt)
2218                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2219         else
2220                 exp_next_lvl = hw_lvl + 1;
2221
2222         /* Check if there is no parent node yet */
2223         if (hw_lvl != dev->otx2_tm_root_lvl &&
2224             (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2225                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2226                 error->message = "invalid parent node id";
2227                 return -EINVAL;
2228         }
2229
2230         /* Check if a node already exists */
2231         if (nix_tm_node_search(dev, node_id, true)) {
2232                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2233                 error->message = "node already exists";
2234                 return -EINVAL;
2235         }
2236
2237         /* Check if shaper profile exists for non leaf node */
2238         if (!nix_tm_is_leaf(dev, lvl) &&
2239             params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
2240             !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) {
2241                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2242                 error->message = "invalid shaper profile";
2243                 return -EINVAL;
2244         }
2245
2246         /* Check if there is second DWRR already in siblings or holes in prio */
2247         if (validate_prio(dev, lvl, parent_node_id, priority, error))
2248                 return -EINVAL;
2249
2250         if (weight > MAX_SCHED_WEIGHT) {
2251                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2252                 error->message = "max weight exceeded";
2253                 return -EINVAL;
2254         }
2255
2256         rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2257                                      priority, weight, hw_lvl,
2258                                      lvl, true, params);
2259         if (rc) {
2260                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2261                 /* cleanup user added nodes */
2262                 if (clear_on_fail)
2263                         nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2264                                               NIX_TM_NODE_USER, false);
2265                 error->message = "failed to add node";
2266                 return rc;
2267         }
2268         error->type = RTE_TM_ERROR_TYPE_NONE;
2269         return 0;
2270 }
2271
2272 static int
2273 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2274                         struct rte_tm_error *error)
2275 {
2276         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2277         struct otx2_nix_tm_node *tm_node, *child_node;
2278         struct otx2_nix_tm_shaper_profile *profile;
2279         uint32_t profile_id;
2280
2281         /* we don't support dynamic updates yet */
2282         if (dev->tm_flags & NIX_TM_COMMITTED) {
2283                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2284                 error->message = "hierarchy exists";
2285                 return -EIO;
2286         }
2287
2288         if (node_id == RTE_TM_NODE_ID_NULL) {
2289                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2290                 error->message = "invalid node id";
2291                 return -EINVAL;
2292         }
2293
2294         tm_node = nix_tm_node_search(dev, node_id, true);
2295         if (!tm_node) {
2296                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2297                 error->message = "no such node";
2298                 return -EINVAL;
2299         }
2300
2301         /* Check for any existing children */
2302         TAILQ_FOREACH(child_node, &dev->node_list, node) {
2303                 if (child_node->parent == tm_node) {
2304                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2305                         error->message = "children exist";
2306                         return -EINVAL;
2307                 }
2308         }
2309
2310         /* Remove shaper profile reference */
2311         profile_id = tm_node->params.shaper_profile_id;
2312         profile = nix_tm_shaper_profile_search(dev, profile_id);
2313         profile->reference_count--;
2314
2315         TAILQ_REMOVE(&dev->node_list, tm_node, node);
2316         rte_free(tm_node);
2317         return 0;
2318 }
2319
2320 static int
2321 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2322                            struct rte_tm_error *error, bool suspend)
2323 {
2324         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2325         struct otx2_mbox *mbox = dev->mbox;
2326         struct otx2_nix_tm_node *tm_node;
2327         struct nix_txschq_config *req;
2328         uint16_t flags;
2329         int rc;
2330
2331         tm_node = nix_tm_node_search(dev, node_id, true);
2332         if (!tm_node) {
2333                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2334                 error->message = "no such node";
2335                 return -EINVAL;
2336         }
2337
2338         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2339                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2340                 error->message = "hierarchy doesn't exist";
2341                 return -EINVAL;
2342         }
2343
2344         flags = tm_node->flags;
2345         flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2346                 (flags | NIX_TM_NODE_ENABLED);
2347
2348         if (tm_node->flags == flags)
2349                 return 0;
2350
2351         /* send mbox for state change */
2352         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2353
2354         req->lvl = tm_node->hw_lvl;
2355         req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2356                                            req->reg, req->regval);
2357         rc = send_tm_reqval(mbox, req, error);
2358         if (!rc)
2359                 tm_node->flags = flags;
2360         return rc;
2361 }
2362
2363 static int
2364 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2365                          struct rte_tm_error *error)
2366 {
2367         return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2368 }
2369
2370 static int
2371 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2372                         struct rte_tm_error *error)
2373 {
2374         return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2375 }
2376
2377 static int
2378 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2379                              int clear_on_fail,
2380                              struct rte_tm_error *error)
2381 {
2382         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2383         struct otx2_nix_tm_node *tm_node;
2384         uint32_t leaf_cnt = 0;
2385         int rc;
2386
2387         if (dev->tm_flags & NIX_TM_COMMITTED) {
2388                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2389                 error->message = "hierarchy exists";
2390                 return -EINVAL;
2391         }
2392
2393         /* Check if we have all the leaf nodes */
2394         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2395                 if (tm_node->flags & NIX_TM_NODE_USER &&
2396                     tm_node->id < dev->tm_leaf_cnt)
2397                         leaf_cnt++;
2398         }
2399
2400         if (leaf_cnt != dev->tm_leaf_cnt) {
2401                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2402                 error->message = "incomplete hierarchy";
2403                 return -EINVAL;
2404         }
2405
2406         /*
2407          * Disable xmit will be enabled when
2408          * new topology is available.
2409          */
2410         rc = nix_xmit_disable(eth_dev);
2411         if (rc) {
2412                 otx2_err("failed to disable TX, rc=%d", rc);
2413                 return -EIO;
2414         }
2415
2416         /* Delete default/ratelimit tree */
2417         if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2418                 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2419                 if (rc) {
2420                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2421                         error->message = "failed to free default resources";
2422                         return rc;
2423                 }
2424                 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2425                                    NIX_TM_RATE_LIMIT_TREE);
2426         }
2427
2428         /* Free up user alloc'ed resources */
2429         rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2430                                    NIX_TM_NODE_USER, true);
2431         if (rc) {
2432                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2433                 error->message = "failed to free user resources";
2434                 return rc;
2435         }
2436
2437         rc = nix_tm_alloc_resources(eth_dev, true);
2438         if (rc) {
2439                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2440                 error->message = "alloc resources failed";
2441                 /* TODO should we restore default config ? */
2442                 if (clear_on_fail)
2443                         nix_tm_free_resources(dev, 0, 0, false);
2444                 return rc;
2445         }
2446
2447         error->type = RTE_TM_ERROR_TYPE_NONE;
2448         dev->tm_flags |= NIX_TM_COMMITTED;
2449         return 0;
2450 }
2451
2452 static int
2453 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2454                                uint32_t node_id,
2455                                uint32_t profile_id,
2456                                struct rte_tm_error *error)
2457 {
2458         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2459         struct otx2_nix_tm_shaper_profile *profile = NULL;
2460         struct otx2_mbox *mbox = dev->mbox;
2461         struct otx2_nix_tm_node *tm_node;
2462         struct nix_txschq_config *req;
2463         uint8_t k;
2464         int rc;
2465
2466         tm_node = nix_tm_node_search(dev, node_id, true);
2467         if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2468                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2469                 error->message = "invalid node";
2470                 return -EINVAL;
2471         }
2472
2473         if (profile_id == tm_node->params.shaper_profile_id)
2474                 return 0;
2475
2476         if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2477                 profile = nix_tm_shaper_profile_search(dev, profile_id);
2478                 if (!profile) {
2479                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2480                         error->message = "shaper profile ID not exist";
2481                         return -EINVAL;
2482                 }
2483         }
2484
2485         tm_node->params.shaper_profile_id = profile_id;
2486
2487         /* Nothing to do if not yet committed */
2488         if (!(dev->tm_flags & NIX_TM_COMMITTED))
2489                 return 0;
2490
2491         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2492
2493         /* Flush the specific node with SW_XOFF */
2494         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2495         req->lvl = tm_node->hw_lvl;
2496         k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2497         req->num_regs = k;
2498
2499         rc = send_tm_reqval(mbox, req, error);
2500         if (rc)
2501                 return rc;
2502
2503         /* Update the PIR/CIR and clear SW XOFF */
2504         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2505         req->lvl = tm_node->hw_lvl;
2506
2507         k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2508
2509         k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2510
2511         req->num_regs = k;
2512         rc = send_tm_reqval(mbox, req, error);
2513         if (!rc)
2514                 tm_node->flags |= NIX_TM_NODE_ENABLED;
2515         return rc;
2516 }
2517
2518 static int
2519 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2520                                uint32_t node_id, uint32_t new_parent_id,
2521                                uint32_t priority, uint32_t weight,
2522                                struct rte_tm_error *error)
2523 {
2524         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2525         struct otx2_nix_tm_node *tm_node, *sibling;
2526         struct otx2_nix_tm_node *new_parent;
2527         struct nix_txschq_config *req;
2528         uint8_t k;
2529         int rc;
2530
2531         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2532                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2533                 error->message = "hierarchy doesn't exist";
2534                 return -EINVAL;
2535         }
2536
2537         tm_node = nix_tm_node_search(dev, node_id, true);
2538         if (!tm_node) {
2539                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2540                 error->message = "no such node";
2541                 return -EINVAL;
2542         }
2543
2544         /* Parent id valid only for non root nodes */
2545         if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2546                 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2547                 if (!new_parent) {
2548                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2549                         error->message = "no such parent node";
2550                         return -EINVAL;
2551                 }
2552
2553                 /* Current support is only for dynamic weight update */
2554                 if (tm_node->parent != new_parent ||
2555                     tm_node->priority != priority) {
2556                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2557                         error->message = "only weight update supported";
2558                         return -EINVAL;
2559                 }
2560         }
2561
2562         /* Skip if no change */
2563         if (tm_node->weight == weight)
2564                 return 0;
2565
2566         tm_node->weight = weight;
2567
2568         /* For leaf nodes, SQ CTX needs update */
2569         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2570                 /* Update SQ quantum data on the fly */
2571                 rc = nix_sq_sched_data(dev, tm_node, true);
2572                 if (rc) {
2573                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2574                         error->message = "sq sched data update failed";
2575                         return rc;
2576                 }
2577         } else {
2578                 /* XOFF Parent node */
2579                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2580                 req->lvl = tm_node->parent->hw_lvl;
2581                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2582                                                    req->reg, req->regval);
2583                 rc = send_tm_reqval(dev->mbox, req, error);
2584                 if (rc)
2585                         return rc;
2586
2587                 /* XOFF this node and all other siblings */
2588                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2589                 req->lvl = tm_node->hw_lvl;
2590
2591                 k = 0;
2592                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2593                         if (sibling->parent != tm_node->parent)
2594                                 continue;
2595                         k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2596                                                 &req->regval[k]);
2597                 }
2598                 req->num_regs = k;
2599                 rc = send_tm_reqval(dev->mbox, req, error);
2600                 if (rc)
2601                         return rc;
2602
2603                 /* Update new weight for current node */
2604                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2605                 req->lvl = tm_node->hw_lvl;
2606                 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2607                                                      req->reg, req->regval);
2608                 rc = send_tm_reqval(dev->mbox, req, error);
2609                 if (rc)
2610                         return rc;
2611
2612                 /* XON this node and all other siblings */
2613                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2614                 req->lvl = tm_node->hw_lvl;
2615
2616                 k = 0;
2617                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2618                         if (sibling->parent != tm_node->parent)
2619                                 continue;
2620                         k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2621                                                 &req->regval[k]);
2622                 }
2623                 req->num_regs = k;
2624                 rc = send_tm_reqval(dev->mbox, req, error);
2625                 if (rc)
2626                         return rc;
2627
2628                 /* XON Parent node */
2629                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2630                 req->lvl = tm_node->parent->hw_lvl;
2631                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2632                                                    req->reg, req->regval);
2633                 rc = send_tm_reqval(dev->mbox, req, error);
2634                 if (rc)
2635                         return rc;
2636         }
2637         return 0;
2638 }
2639
2640 static int
2641 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2642                             struct rte_tm_node_stats *stats,
2643                             uint64_t *stats_mask, int clear,
2644                             struct rte_tm_error *error)
2645 {
2646         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2647         struct otx2_nix_tm_node *tm_node;
2648         uint64_t reg, val;
2649         int64_t *addr;
2650         int rc = 0;
2651
2652         tm_node = nix_tm_node_search(dev, node_id, true);
2653         if (!tm_node) {
2654                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2655                 error->message = "no such node";
2656                 return -EINVAL;
2657         }
2658
2659         /* Stats support only for leaf node or TL1 root */
2660         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2661                 reg = (((uint64_t)tm_node->id) << 32);
2662
2663                 /* Packets */
2664                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2665                 val = otx2_atomic64_add_nosync(reg, addr);
2666                 if (val & OP_ERR)
2667                         val = 0;
2668                 stats->n_pkts = val - tm_node->last_pkts;
2669
2670                 /* Bytes */
2671                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2672                 val = otx2_atomic64_add_nosync(reg, addr);
2673                 if (val & OP_ERR)
2674                         val = 0;
2675                 stats->n_bytes = val - tm_node->last_bytes;
2676
2677                 if (clear) {
2678                         tm_node->last_pkts = stats->n_pkts;
2679                         tm_node->last_bytes = stats->n_bytes;
2680                 }
2681
2682                 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2683
2684         } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2685                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2686                 error->message = "stats read error";
2687
2688                 /* RED Drop packets */
2689                 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2690                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2691                 if (rc)
2692                         goto exit;
2693                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2694                                                 val - tm_node->last_pkts;
2695
2696                 /* RED Drop bytes */
2697                 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2698                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2699                 if (rc)
2700                         goto exit;
2701                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2702                                                 val - tm_node->last_bytes;
2703
2704                 /* Clear stats */
2705                 if (clear) {
2706                         tm_node->last_pkts =
2707                                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2708                         tm_node->last_bytes =
2709                                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2710                 }
2711
2712                 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2713                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2714
2715         } else {
2716                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2717                 error->message = "unsupported node";
2718                 rc = -EINVAL;
2719         }
2720
2721 exit:
2722         return rc;
2723 }
2724
2725 const struct rte_tm_ops otx2_tm_ops = {
2726         .node_type_get = otx2_nix_tm_node_type_get,
2727
2728         .capabilities_get = otx2_nix_tm_capa_get,
2729         .level_capabilities_get = otx2_nix_tm_level_capa_get,
2730         .node_capabilities_get = otx2_nix_tm_node_capa_get,
2731
2732         .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2733         .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2734
2735         .node_add = otx2_nix_tm_node_add,
2736         .node_delete = otx2_nix_tm_node_delete,
2737         .node_suspend = otx2_nix_tm_node_suspend,
2738         .node_resume = otx2_nix_tm_node_resume,
2739         .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2740
2741         .node_shaper_update = otx2_nix_tm_node_shaper_update,
2742         .node_parent_update = otx2_nix_tm_node_parent_update,
2743         .node_stats_read = otx2_nix_tm_node_stats_read,
2744 };
2745
2746 static int
2747 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2748 {
2749         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2750         uint32_t def = eth_dev->data->nb_tx_queues;
2751         struct rte_tm_node_params params;
2752         uint32_t leaf_parent, i;
2753         int rc = 0, leaf_level;
2754
2755         /* Default params */
2756         memset(&params, 0, sizeof(params));
2757         params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2758
2759         if (nix_tm_have_tl1_access(dev)) {
2760                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2761                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2762                                              DEFAULT_RR_WEIGHT,
2763                                              NIX_TXSCH_LVL_TL1,
2764                                              OTX2_TM_LVL_ROOT, false, &params);
2765                 if (rc)
2766                         goto exit;
2767                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2768                                              DEFAULT_RR_WEIGHT,
2769                                              NIX_TXSCH_LVL_TL2,
2770                                              OTX2_TM_LVL_SCH1, false, &params);
2771                 if (rc)
2772                         goto exit;
2773
2774                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2775                                              DEFAULT_RR_WEIGHT,
2776                                              NIX_TXSCH_LVL_TL3,
2777                                              OTX2_TM_LVL_SCH2, false, &params);
2778                 if (rc)
2779                         goto exit;
2780
2781                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2782                                              DEFAULT_RR_WEIGHT,
2783                                              NIX_TXSCH_LVL_TL4,
2784                                              OTX2_TM_LVL_SCH3, false, &params);
2785                 if (rc)
2786                         goto exit;
2787
2788                 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2789                                              DEFAULT_RR_WEIGHT,
2790                                              NIX_TXSCH_LVL_SMQ,
2791                                              OTX2_TM_LVL_SCH4, false, &params);
2792                 if (rc)
2793                         goto exit;
2794
2795                 leaf_parent = def + 4;
2796                 leaf_level = OTX2_TM_LVL_QUEUE;
2797         } else {
2798                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2799                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2800                                              DEFAULT_RR_WEIGHT,
2801                                              NIX_TXSCH_LVL_TL2,
2802                                              OTX2_TM_LVL_ROOT, false, &params);
2803                 if (rc)
2804                         goto exit;
2805
2806                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2807                                              DEFAULT_RR_WEIGHT,
2808                                              NIX_TXSCH_LVL_TL3,
2809                                              OTX2_TM_LVL_SCH1, false, &params);
2810                 if (rc)
2811                         goto exit;
2812
2813                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2814                                              DEFAULT_RR_WEIGHT,
2815                                              NIX_TXSCH_LVL_TL4,
2816                                              OTX2_TM_LVL_SCH2, false, &params);
2817                 if (rc)
2818                         goto exit;
2819
2820                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2821                                              DEFAULT_RR_WEIGHT,
2822                                              NIX_TXSCH_LVL_SMQ,
2823                                              OTX2_TM_LVL_SCH3, false, &params);
2824                 if (rc)
2825                         goto exit;
2826
2827                 leaf_parent = def + 3;
2828                 leaf_level = OTX2_TM_LVL_SCH4;
2829         }
2830
2831         /* Add leaf nodes */
2832         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2833                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2834                                              DEFAULT_RR_WEIGHT,
2835                                              NIX_TXSCH_LVL_CNT,
2836                                              leaf_level, false, &params);
2837                 if (rc)
2838                         break;
2839         }
2840
2841 exit:
2842         return rc;
2843 }
2844
2845 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2846 {
2847         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2848
2849         TAILQ_INIT(&dev->node_list);
2850         TAILQ_INIT(&dev->shaper_profile_list);
2851         dev->tm_rate_min = 1E9; /* 1Gbps */
2852 }
2853
2854 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2855 {
2856         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2857         struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
2858         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2859         int rc;
2860
2861         /* Free up all resources already held */
2862         rc = nix_tm_free_resources(dev, 0, 0, false);
2863         if (rc) {
2864                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2865                 return rc;
2866         }
2867
2868         /* Clear shaper profiles */
2869         nix_tm_clear_shaper_profiles(dev);
2870         dev->tm_flags = NIX_TM_DEFAULT_TREE;
2871
2872         /* Disable TL1 Static Priority when VF's are enabled
2873          * as otherwise VF's TL2 reallocation will be needed
2874          * runtime to support a specific topology of PF.
2875          */
2876         if (pci_dev->max_vfs)
2877                 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2878
2879         rc = nix_tm_prepare_default_tree(eth_dev);
2880         if (rc != 0)
2881                 return rc;
2882
2883         rc = nix_tm_alloc_resources(eth_dev, false);
2884         if (rc != 0)
2885                 return rc;
2886         dev->tm_leaf_cnt = sq_cnt;
2887
2888         return 0;
2889 }
2890
2891 static int
2892 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
2893 {
2894         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2895         uint32_t def = eth_dev->data->nb_tx_queues;
2896         struct rte_tm_node_params params;
2897         uint32_t leaf_parent, i, rc = 0;
2898
2899         memset(&params, 0, sizeof(params));
2900
2901         if (nix_tm_have_tl1_access(dev)) {
2902                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2903                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2904                                         DEFAULT_RR_WEIGHT,
2905                                         NIX_TXSCH_LVL_TL1,
2906                                         OTX2_TM_LVL_ROOT, false, &params);
2907                 if (rc)
2908                         goto error;
2909                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2910                                         DEFAULT_RR_WEIGHT,
2911                                         NIX_TXSCH_LVL_TL2,
2912                                         OTX2_TM_LVL_SCH1, false, &params);
2913                 if (rc)
2914                         goto error;
2915                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2916                                         DEFAULT_RR_WEIGHT,
2917                                         NIX_TXSCH_LVL_TL3,
2918                                         OTX2_TM_LVL_SCH2, false, &params);
2919                 if (rc)
2920                         goto error;
2921                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2922                                         DEFAULT_RR_WEIGHT,
2923                                         NIX_TXSCH_LVL_TL4,
2924                                         OTX2_TM_LVL_SCH3, false, &params);
2925                 if (rc)
2926                         goto error;
2927                 leaf_parent = def + 3;
2928
2929                 /* Add per queue SMQ nodes */
2930                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2931                         rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2932                                                 leaf_parent,
2933                                                 0, DEFAULT_RR_WEIGHT,
2934                                                 NIX_TXSCH_LVL_SMQ,
2935                                                 OTX2_TM_LVL_SCH4,
2936                                                 false, &params);
2937                         if (rc)
2938                                 goto error;
2939                 }
2940
2941                 /* Add leaf nodes */
2942                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2943                         rc = nix_tm_node_add_to_list(dev, i,
2944                                                      leaf_parent + 1 + i, 0,
2945                                                      DEFAULT_RR_WEIGHT,
2946                                                      NIX_TXSCH_LVL_CNT,
2947                                                      OTX2_TM_LVL_QUEUE,
2948                                                      false, &params);
2949                 if (rc)
2950                         goto error;
2951                 }
2952
2953                 return 0;
2954         }
2955
2956         dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2957         rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2958                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
2959                                 OTX2_TM_LVL_ROOT, false, &params);
2960         if (rc)
2961                 goto error;
2962         rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2963                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
2964                                 OTX2_TM_LVL_SCH1, false, &params);
2965         if (rc)
2966                 goto error;
2967         rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2968                                      DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
2969                                      OTX2_TM_LVL_SCH2, false, &params);
2970         if (rc)
2971                 goto error;
2972         leaf_parent = def + 2;
2973
2974         /* Add per queue SMQ nodes */
2975         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2976                 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
2977                                              leaf_parent,
2978                                              0, DEFAULT_RR_WEIGHT,
2979                                              NIX_TXSCH_LVL_SMQ,
2980                                              OTX2_TM_LVL_SCH3,
2981                                              false, &params);
2982                 if (rc)
2983                         goto error;
2984         }
2985
2986         /* Add leaf nodes */
2987         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2988                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
2989                                              DEFAULT_RR_WEIGHT,
2990                                              NIX_TXSCH_LVL_CNT,
2991                                              OTX2_TM_LVL_SCH4,
2992                                              false, &params);
2993                 if (rc)
2994                         break;
2995         }
2996 error:
2997         return rc;
2998 }
2999
3000 static int
3001 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3002                            struct otx2_nix_tm_node *tm_node,
3003                            uint64_t tx_rate)
3004 {
3005         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3006         struct otx2_nix_tm_shaper_profile profile;
3007         struct otx2_mbox *mbox = dev->mbox;
3008         volatile uint64_t *reg, *regval;
3009         struct nix_txschq_config *req;
3010         uint16_t flags;
3011         uint8_t k = 0;
3012         int rc;
3013
3014         flags = tm_node->flags;
3015
3016         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3017         req->lvl = NIX_TXSCH_LVL_MDQ;
3018         reg = req->reg;
3019         regval = req->regval;
3020
3021         if (tx_rate == 0) {
3022                 k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
3023                 flags &= ~NIX_TM_NODE_ENABLED;
3024                 goto exit;
3025         }
3026
3027         if (!(flags & NIX_TM_NODE_ENABLED)) {
3028                 k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
3029                 flags |= NIX_TM_NODE_ENABLED;
3030         }
3031
3032         /* Use only PIR for rate limit */
3033         memset(&profile, 0, sizeof(profile));
3034         profile.params.peak.rate = tx_rate;
3035         /* Minimum burst of ~4us Bytes of Tx */
3036         profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3037                                            (4ull * tx_rate) / (1E6 * 8));
3038         if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3039                 dev->tm_rate_min = tx_rate;
3040
3041         k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
3042 exit:
3043         req->num_regs = k;
3044         rc = otx2_mbox_process(mbox);
3045         if (rc)
3046                 return rc;
3047
3048         tm_node->flags = flags;
3049         return 0;
3050 }
3051
3052 int
3053 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3054                                 uint16_t queue_idx, uint16_t tx_rate_mbps)
3055 {
3056         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3057         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3058         struct otx2_nix_tm_node *tm_node;
3059         int rc;
3060
3061         /* Check for supported revisions */
3062         if (otx2_dev_is_95xx_Ax(dev) ||
3063             otx2_dev_is_96xx_Ax(dev))
3064                 return -EINVAL;
3065
3066         if (queue_idx >= eth_dev->data->nb_tx_queues)
3067                 return -EINVAL;
3068
3069         if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3070             !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3071                 goto error;
3072
3073         if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3074             eth_dev->data->nb_tx_queues > 1) {
3075                 /* For TM topology change ethdev needs to be stopped */
3076                 if (eth_dev->data->dev_started)
3077                         return -EBUSY;
3078
3079                 /*
3080                  * Disable xmit will be enabled when
3081                  * new topology is available.
3082                  */
3083                 rc = nix_xmit_disable(eth_dev);
3084                 if (rc) {
3085                         otx2_err("failed to disable TX, rc=%d", rc);
3086                         return -EIO;
3087                 }
3088
3089                 rc = nix_tm_free_resources(dev, 0, 0, false);
3090                 if (rc < 0) {
3091                         otx2_tm_dbg("failed to free default resources, rc %d",
3092                                    rc);
3093                         return -EIO;
3094                 }
3095
3096                 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3097                 if (rc < 0) {
3098                         otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3099                         return rc;
3100                 }
3101
3102                 rc = nix_tm_alloc_resources(eth_dev, true);
3103                 if (rc != 0) {
3104                         otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3105                         return rc;
3106                 }
3107
3108                 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3109                 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3110         }
3111
3112         tm_node = nix_tm_node_search(dev, queue_idx, false);
3113
3114         /* check if we found a valid leaf node */
3115         if (!tm_node ||
3116             !nix_tm_is_leaf(dev, tm_node->lvl) ||
3117             !tm_node->parent ||
3118             tm_node->parent->hw_id == UINT32_MAX)
3119                 return -EIO;
3120
3121         return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3122 error:
3123         otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3124         return -EINVAL;
3125 }
3126
3127 int
3128 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3129 {
3130         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3131
3132         if (!arg)
3133                 return -EINVAL;
3134
3135         /* Check for supported revisions */
3136         if (otx2_dev_is_95xx_Ax(dev) ||
3137             otx2_dev_is_96xx_Ax(dev))
3138                 return -EINVAL;
3139
3140         *(const void **)arg = &otx2_tm_ops;
3141
3142         return 0;
3143 }
3144
3145 int
3146 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3147 {
3148         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3149         int rc;
3150
3151         /* Xmit is assumed to be disabled */
3152         /* Free up resources already held */
3153         rc = nix_tm_free_resources(dev, 0, 0, false);
3154         if (rc) {
3155                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3156                 return rc;
3157         }
3158
3159         /* Clear shaper profiles */
3160         nix_tm_clear_shaper_profiles(dev);
3161
3162         dev->tm_flags = 0;
3163         return 0;
3164 }
3165
3166 int
3167 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3168                           uint32_t *rr_quantum, uint16_t *smq)
3169 {
3170         struct otx2_nix_tm_node *tm_node;
3171         int rc;
3172
3173         /* 0..sq_cnt-1 are leaf nodes */
3174         if (sq >= dev->tm_leaf_cnt)
3175                 return -EINVAL;
3176
3177         /* Search for internal node first */
3178         tm_node = nix_tm_node_search(dev, sq, false);
3179         if (!tm_node)
3180                 tm_node = nix_tm_node_search(dev, sq, true);
3181
3182         /* Check if we found a valid leaf node */
3183         if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3184             !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3185                 return -EIO;
3186         }
3187
3188         /* Get SMQ Id of leaf node's parent */
3189         *smq = tm_node->parent->hw_id;
3190         *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3191
3192         rc = nix_smq_xoff(dev, tm_node->parent, false);
3193         if (rc)
3194                 return rc;
3195         tm_node->flags |= NIX_TM_NODE_ENABLED;
3196
3197         return 0;
3198 }