net/i40e: fix crash in AVX512
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_tm.h"
9
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
12
13 enum otx2_tm_node_level {
14         OTX2_TM_LVL_ROOT = 0,
15         OTX2_TM_LVL_SCH1,
16         OTX2_TM_LVL_SCH2,
17         OTX2_TM_LVL_SCH3,
18         OTX2_TM_LVL_SCH4,
19         OTX2_TM_LVL_QUEUE,
20         OTX2_TM_LVL_MAX,
21 };
22
23 static inline
24 uint64_t shaper2regval(struct shaper_params *shaper)
25 {
26         return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27                 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28                 (shaper->mantissa << 1);
29 }
30
31 int
32 otx2_nix_get_link(struct otx2_eth_dev *dev)
33 {
34         int link = 13 /* SDP */;
35         uint16_t lmac_chan;
36         uint16_t map;
37
38         lmac_chan = dev->tx_chan_base;
39
40         /* CGX lmac link */
41         if (lmac_chan >= 0x800) {
42                 map = lmac_chan & 0x7FF;
43                 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44         } else if (lmac_chan < 0x700) {
45                 /* LBK channel */
46                 link = 12;
47         }
48
49         return link;
50 }
51
52 static uint8_t
53 nix_get_relchan(struct otx2_eth_dev *dev)
54 {
55         return dev->tx_chan_base & 0xff;
56 }
57
58 static bool
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
60 {
61         bool is_lbk = otx2_dev_is_lbk(dev);
62         return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
63 }
64
65 static bool
66 nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
67 {
68         if (nix_tm_have_tl1_access(dev))
69                 return (lvl == OTX2_TM_LVL_QUEUE);
70
71         return (lvl == OTX2_TM_LVL_SCH4);
72 }
73
74 static int
75 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
76 {
77         struct otx2_nix_tm_node *child_node;
78
79         TAILQ_FOREACH(child_node, &dev->node_list, node) {
80                 if (!child_node->parent)
81                         continue;
82                 if (!(child_node->parent->id == node_id))
83                         continue;
84                 if (child_node->priority == child_node->parent->rr_prio)
85                         continue;
86                 return child_node->hw_id - child_node->priority;
87         }
88         return 0;
89 }
90
91
92 static struct otx2_nix_tm_shaper_profile *
93 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
94 {
95         struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
96
97         TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
98                 if (tm_shaper_profile->shaper_profile_id == shaper_id)
99                         return tm_shaper_profile;
100         }
101         return NULL;
102 }
103
104 static inline uint64_t
105 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
106                    uint64_t *mantissa_p, uint64_t *div_exp_p)
107 {
108         uint64_t div_exp, exponent, mantissa;
109
110         /* Boundary checks */
111         if (value < MIN_SHAPER_RATE ||
112             value > MAX_SHAPER_RATE)
113                 return 0;
114
115         if (value <= SHAPER_RATE(0, 0, 0)) {
116                 /* Calculate rate div_exp and mantissa using
117                  * the following formula:
118                  *
119                  * value = (2E6 * (256 + mantissa)
120                  *              / ((1 << div_exp) * 256))
121                  */
122                 div_exp = 0;
123                 exponent = 0;
124                 mantissa = MAX_RATE_MANTISSA;
125
126                 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
127                         div_exp += 1;
128
129                 while (value <
130                        ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
131                         ((1 << div_exp) * 256)))
132                         mantissa -= 1;
133         } else {
134                 /* Calculate rate exponent and mantissa using
135                  * the following formula:
136                  *
137                  * value = (2E6 * ((256 + mantissa) << exponent)) / 256
138                  *
139                  */
140                 div_exp = 0;
141                 exponent = MAX_RATE_EXPONENT;
142                 mantissa = MAX_RATE_MANTISSA;
143
144                 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
145                         exponent -= 1;
146
147                 while (value < ((NIX_SHAPER_RATE_CONST *
148                                 ((256 + mantissa) << exponent)) / 256))
149                         mantissa -= 1;
150         }
151
152         if (div_exp > MAX_RATE_DIV_EXP ||
153             exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
154                 return 0;
155
156         if (div_exp_p)
157                 *div_exp_p = div_exp;
158         if (exponent_p)
159                 *exponent_p = exponent;
160         if (mantissa_p)
161                 *mantissa_p = mantissa;
162
163         /* Calculate real rate value */
164         return SHAPER_RATE(exponent, mantissa, div_exp);
165 }
166
167 static inline uint64_t
168 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
169                     uint64_t *mantissa_p)
170 {
171         uint64_t exponent, mantissa;
172
173         if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
174                 return 0;
175
176         /* Calculate burst exponent and mantissa using
177          * the following formula:
178          *
179          * value = (((256 + mantissa) << (exponent + 1)
180          / 256)
181          *
182          */
183         exponent = MAX_BURST_EXPONENT;
184         mantissa = MAX_BURST_MANTISSA;
185
186         while (value < (1ull << (exponent + 1)))
187                 exponent -= 1;
188
189         while (value < ((256 + mantissa) << (exponent + 1)) / 256)
190                 mantissa -= 1;
191
192         if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
193                 return 0;
194
195         if (exponent_p)
196                 *exponent_p = exponent;
197         if (mantissa_p)
198                 *mantissa_p = mantissa;
199
200         return SHAPER_BURST(exponent, mantissa);
201 }
202
203 static void
204 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
205                      struct shaper_params *cir,
206                      struct shaper_params *pir)
207 {
208         struct rte_tm_shaper_params *param = &profile->params;
209
210         if (!profile)
211                 return;
212
213         /* Calculate CIR exponent and mantissa */
214         if (param->committed.rate)
215                 cir->rate = shaper_rate_to_nix(param->committed.rate,
216                                                &cir->exponent,
217                                                &cir->mantissa,
218                                                &cir->div_exp);
219
220         /* Calculate PIR exponent and mantissa */
221         if (param->peak.rate)
222                 pir->rate = shaper_rate_to_nix(param->peak.rate,
223                                                &pir->exponent,
224                                                &pir->mantissa,
225                                                &pir->div_exp);
226
227         /* Calculate CIR burst exponent and mantissa */
228         if (param->committed.size)
229                 cir->burst = shaper_burst_to_nix(param->committed.size,
230                                                  &cir->burst_exponent,
231                                                  &cir->burst_mantissa);
232
233         /* Calculate PIR burst exponent and mantissa */
234         if (param->peak.size)
235                 pir->burst = shaper_burst_to_nix(param->peak.size,
236                                                  &pir->burst_exponent,
237                                                  &pir->burst_mantissa);
238 }
239
240 static void
241 shaper_default_red_algo(struct otx2_eth_dev *dev,
242                         struct otx2_nix_tm_node *tm_node,
243                         struct otx2_nix_tm_shaper_profile *profile)
244 {
245         struct shaper_params cir, pir;
246
247         /* C0 doesn't support STALL when both PIR & CIR are enabled */
248         if (profile && otx2_dev_is_96xx_Cx(dev)) {
249                 memset(&cir, 0, sizeof(cir));
250                 memset(&pir, 0, sizeof(pir));
251                 shaper_config_to_nix(profile, &cir, &pir);
252
253                 if (pir.rate && cir.rate) {
254                         tm_node->red_algo = NIX_REDALG_DISCARD;
255                         tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
256                         return;
257                 }
258         }
259
260         tm_node->red_algo = NIX_REDALG_STD;
261         tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
262 }
263
264 static int
265 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
266 {
267         struct otx2_mbox *mbox = dev->mbox;
268         struct nix_txschq_config *req;
269
270         /*
271          * Default config for TL1.
272          * For VF this is always ignored.
273          */
274
275         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
276         req->lvl = NIX_TXSCH_LVL_TL1;
277
278         /* Set DWRR quantum */
279         req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
280         req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
281         req->num_regs++;
282
283         req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
284         req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
285         req->num_regs++;
286
287         req->reg[2] = NIX_AF_TL1X_CIR(schq);
288         req->regval[2] = 0;
289         req->num_regs++;
290
291         return otx2_mbox_process(mbox);
292 }
293
294 static uint8_t
295 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
296                      struct otx2_nix_tm_node *tm_node,
297                      volatile uint64_t *reg, volatile uint64_t *regval)
298 {
299         uint64_t strict_prio = tm_node->priority;
300         uint32_t hw_lvl = tm_node->hw_lvl;
301         uint32_t schq = tm_node->hw_id;
302         uint64_t rr_quantum;
303         uint8_t k = 0;
304
305         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
306
307         /* For children to root, strict prio is default if either
308          * device root is TL2 or TL1 Static Priority is disabled.
309          */
310         if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
311             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
312              dev->tm_flags & NIX_TM_TL1_NO_SP))
313                 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
314
315         otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
316                      "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
317                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
318                      tm_node->id, strict_prio, rr_quantum, tm_node);
319
320         switch (hw_lvl) {
321         case NIX_TXSCH_LVL_SMQ:
322                 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
323                 regval[k] = (strict_prio << 24) | rr_quantum;
324                 k++;
325
326                 break;
327         case NIX_TXSCH_LVL_TL4:
328                 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
329                 regval[k] = (strict_prio << 24) | rr_quantum;
330                 k++;
331
332                 break;
333         case NIX_TXSCH_LVL_TL3:
334                 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
335                 regval[k] = (strict_prio << 24) | rr_quantum;
336                 k++;
337
338                 break;
339         case NIX_TXSCH_LVL_TL2:
340                 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
341                 regval[k] = (strict_prio << 24) | rr_quantum;
342                 k++;
343
344                 break;
345         case NIX_TXSCH_LVL_TL1:
346                 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
347                 regval[k] = rr_quantum;
348                 k++;
349
350                 break;
351         }
352
353         return k;
354 }
355
356 static uint8_t
357 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
358                       struct otx2_nix_tm_shaper_profile *profile,
359                       volatile uint64_t *reg, volatile uint64_t *regval)
360 {
361         struct shaper_params cir, pir;
362         uint32_t schq = tm_node->hw_id;
363         uint64_t adjust = 0;
364         uint8_t k = 0;
365
366         memset(&cir, 0, sizeof(cir));
367         memset(&pir, 0, sizeof(pir));
368         shaper_config_to_nix(profile, &cir, &pir);
369
370         /* Packet length adjust */
371         if (tm_node->pkt_mode)
372                 adjust = 1;
373         else if (profile)
374                 adjust = profile->params.pkt_length_adjust & 0x1FF;
375
376         otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, pir %" PRIu64
377                     "(%" PRIu64 "B), cir %" PRIu64 "(%" PRIu64 "B)"
378                     "adjust 0x%" PRIx64 "(pktmode %u) (%p)",
379                     nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
380                     tm_node->id, pir.rate, pir.burst, cir.rate, cir.burst,
381                     adjust, tm_node->pkt_mode, tm_node);
382
383         switch (tm_node->hw_lvl) {
384         case NIX_TXSCH_LVL_SMQ:
385                 /* Configure PIR, CIR */
386                 reg[k] = NIX_AF_MDQX_PIR(schq);
387                 regval[k] = (pir.rate && pir.burst) ?
388                                 (shaper2regval(&pir) | 1) : 0;
389                 k++;
390
391                 reg[k] = NIX_AF_MDQX_CIR(schq);
392                 regval[k] = (cir.rate && cir.burst) ?
393                                 (shaper2regval(&cir) | 1) : 0;
394                 k++;
395
396                 /* Configure RED ALG */
397                 reg[k] = NIX_AF_MDQX_SHAPE(schq);
398                 regval[k] = (adjust |
399                              (uint64_t)tm_node->red_algo << 9 |
400                              (uint64_t)tm_node->pkt_mode << 24);
401                 k++;
402                 break;
403         case NIX_TXSCH_LVL_TL4:
404                 /* Configure PIR, CIR */
405                 reg[k] = NIX_AF_TL4X_PIR(schq);
406                 regval[k] = (pir.rate && pir.burst) ?
407                                 (shaper2regval(&pir) | 1) : 0;
408                 k++;
409
410                 reg[k] = NIX_AF_TL4X_CIR(schq);
411                 regval[k] = (cir.rate && cir.burst) ?
412                                 (shaper2regval(&cir) | 1) : 0;
413                 k++;
414
415                 /* Configure RED algo */
416                 reg[k] = NIX_AF_TL4X_SHAPE(schq);
417                 regval[k] = (adjust |
418                              (uint64_t)tm_node->red_algo << 9 |
419                              (uint64_t)tm_node->pkt_mode << 24);
420                 k++;
421                 break;
422         case NIX_TXSCH_LVL_TL3:
423                 /* Configure PIR, CIR */
424                 reg[k] = NIX_AF_TL3X_PIR(schq);
425                 regval[k] = (pir.rate && pir.burst) ?
426                                 (shaper2regval(&pir) | 1) : 0;
427                 k++;
428
429                 reg[k] = NIX_AF_TL3X_CIR(schq);
430                 regval[k] = (cir.rate && cir.burst) ?
431                                 (shaper2regval(&cir) | 1) : 0;
432                 k++;
433
434                 /* Configure RED algo */
435                 reg[k] = NIX_AF_TL3X_SHAPE(schq);
436                 regval[k] = (adjust |
437                              (uint64_t)tm_node->red_algo << 9 |
438                              (uint64_t)tm_node->pkt_mode << 24);
439                 k++;
440
441                 break;
442         case NIX_TXSCH_LVL_TL2:
443                 /* Configure PIR, CIR */
444                 reg[k] = NIX_AF_TL2X_PIR(schq);
445                 regval[k] = (pir.rate && pir.burst) ?
446                                 (shaper2regval(&pir) | 1) : 0;
447                 k++;
448
449                 reg[k] = NIX_AF_TL2X_CIR(schq);
450                 regval[k] = (cir.rate && cir.burst) ?
451                                 (shaper2regval(&cir) | 1) : 0;
452                 k++;
453
454                 /* Configure RED algo */
455                 reg[k] = NIX_AF_TL2X_SHAPE(schq);
456                 regval[k] = (adjust |
457                              (uint64_t)tm_node->red_algo << 9 |
458                              (uint64_t)tm_node->pkt_mode << 24);
459                 k++;
460
461                 break;
462         case NIX_TXSCH_LVL_TL1:
463                 /* Configure CIR */
464                 reg[k] = NIX_AF_TL1X_CIR(schq);
465                 regval[k] = (cir.rate && cir.burst) ?
466                                 (shaper2regval(&cir) | 1) : 0;
467                 k++;
468
469                 /* Configure length disable and adjust */
470                 reg[k] = NIX_AF_TL1X_SHAPE(schq);
471                 regval[k] = (adjust |
472                              (uint64_t)tm_node->pkt_mode << 24);
473                 k++;
474                 break;
475         }
476
477         return k;
478 }
479
480 static uint8_t
481 prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
482                    volatile uint64_t *reg, volatile uint64_t *regval)
483 {
484         uint32_t hw_lvl = tm_node->hw_lvl;
485         uint32_t schq = tm_node->hw_id;
486         uint8_t k = 0;
487
488         otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
489                     nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
490                     tm_node->id, enable, tm_node);
491
492         regval[k] = enable;
493
494         switch (hw_lvl) {
495         case NIX_TXSCH_LVL_MDQ:
496                 reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
497                 k++;
498                 break;
499         case NIX_TXSCH_LVL_TL4:
500                 reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
501                 k++;
502                 break;
503         case NIX_TXSCH_LVL_TL3:
504                 reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
505                 k++;
506                 break;
507         case NIX_TXSCH_LVL_TL2:
508                 reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
509                 k++;
510                 break;
511         case NIX_TXSCH_LVL_TL1:
512                 reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
513                 k++;
514                 break;
515         default:
516                 break;
517         }
518
519         return k;
520 }
521
522 static int
523 populate_tm_reg(struct otx2_eth_dev *dev,
524                 struct otx2_nix_tm_node *tm_node)
525 {
526         struct otx2_nix_tm_shaper_profile *profile;
527         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
528         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
529         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
530         struct otx2_mbox *mbox = dev->mbox;
531         uint64_t parent = 0, child = 0;
532         uint32_t hw_lvl, rr_prio, schq;
533         struct nix_txschq_config *req;
534         int rc = -EFAULT;
535         uint8_t k = 0;
536
537         memset(regval_mask, 0, sizeof(regval_mask));
538         profile = nix_tm_shaper_profile_search(dev,
539                                         tm_node->params.shaper_profile_id);
540         rr_prio = tm_node->rr_prio;
541         hw_lvl = tm_node->hw_lvl;
542         schq = tm_node->hw_id;
543
544         /* Root node will not have a parent node */
545         if (hw_lvl == dev->otx2_tm_root_lvl)
546                 parent = tm_node->parent_hw_id;
547         else
548                 parent = tm_node->parent->hw_id;
549
550         /* Do we need this trigger to configure TL1 */
551         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
552             hw_lvl == dev->otx2_tm_root_lvl) {
553                 rc = populate_tm_tl1_default(dev, parent);
554                 if (rc)
555                         goto error;
556         }
557
558         if (hw_lvl != NIX_TXSCH_LVL_SMQ)
559                 child = find_prio_anchor(dev, tm_node->id);
560
561         /* Override default rr_prio when TL1
562          * Static Priority is disabled
563          */
564         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
565             dev->tm_flags & NIX_TM_TL1_NO_SP) {
566                 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
567                 child = 0;
568         }
569
570         otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
571                     " prio_anchor %"PRIu64" rr_prio %u (%p)",
572                     nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
573                     parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
574
575         /* Prepare Topology and Link config */
576         switch (hw_lvl) {
577         case NIX_TXSCH_LVL_SMQ:
578
579                 /* Set xoff which will be cleared later and minimum length
580                  * which will be used for zero padding if packet length is
581                  * smaller
582                  */
583                 reg[k] = NIX_AF_SMQX_CFG(schq);
584                 regval[k] = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) |
585                         NIX_MIN_HW_FRS;
586                 regval_mask[k] = ~(BIT_ULL(50) | (0x7ULL << 36) | 0x7f);
587                 k++;
588
589                 /* Parent and schedule conf */
590                 reg[k] = NIX_AF_MDQX_PARENT(schq);
591                 regval[k] = parent << 16;
592                 k++;
593
594                 break;
595         case NIX_TXSCH_LVL_TL4:
596                 /* Parent and schedule conf */
597                 reg[k] = NIX_AF_TL4X_PARENT(schq);
598                 regval[k] = parent << 16;
599                 k++;
600
601                 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
602                 regval[k] = (child << 32) | (rr_prio << 1);
603                 k++;
604
605                 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
606                 if (otx2_dev_is_sdp(dev)) {
607                         reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
608                         regval[k] = BIT_ULL(12);
609                         k++;
610                 }
611                 break;
612         case NIX_TXSCH_LVL_TL3:
613                 /* Parent and schedule conf */
614                 reg[k] = NIX_AF_TL3X_PARENT(schq);
615                 regval[k] = parent << 16;
616                 k++;
617
618                 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
619                 regval[k] = (child << 32) | (rr_prio << 1);
620                 k++;
621
622                 /* Link configuration */
623                 if (!otx2_dev_is_sdp(dev) &&
624                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
625                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
626                                                 otx2_nix_get_link(dev));
627                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
628                         k++;
629                 }
630
631                 break;
632         case NIX_TXSCH_LVL_TL2:
633                 /* Parent and schedule conf */
634                 reg[k] = NIX_AF_TL2X_PARENT(schq);
635                 regval[k] = parent << 16;
636                 k++;
637
638                 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
639                 regval[k] = (child << 32) | (rr_prio << 1);
640                 k++;
641
642                 /* Link configuration */
643                 if (!otx2_dev_is_sdp(dev) &&
644                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
645                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
646                                                 otx2_nix_get_link(dev));
647                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
648                         k++;
649                 }
650
651                 break;
652         case NIX_TXSCH_LVL_TL1:
653                 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
654                 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
655                 k++;
656
657                 break;
658         }
659
660         /* Prepare schedule config */
661         k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
662
663         /* Prepare shaping config */
664         k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
665
666         if (!k)
667                 return 0;
668
669         /* Copy and send config mbox */
670         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
671         req->lvl = hw_lvl;
672         req->num_regs = k;
673
674         otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
675         otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
676         otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
677
678         rc = otx2_mbox_process(mbox);
679         if (rc)
680                 goto error;
681
682         return 0;
683 error:
684         otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
685         return rc;
686 }
687
688
689 static int
690 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
691 {
692         struct otx2_nix_tm_node *tm_node;
693         uint32_t hw_lvl;
694         int rc = 0;
695
696         for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
697                 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
698                         if (tm_node->hw_lvl == hw_lvl &&
699                             tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
700                                 rc = populate_tm_reg(dev, tm_node);
701                                 if (rc)
702                                         goto exit;
703                         }
704                 }
705         }
706 exit:
707         return rc;
708 }
709
710 static struct otx2_nix_tm_node *
711 nix_tm_node_search(struct otx2_eth_dev *dev,
712                    uint32_t node_id, bool user)
713 {
714         struct otx2_nix_tm_node *tm_node;
715
716         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
717                 if (tm_node->id == node_id &&
718                     (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
719                         return tm_node;
720         }
721         return NULL;
722 }
723
724 static uint32_t
725 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
726 {
727         struct otx2_nix_tm_node *tm_node;
728         uint32_t rr_num = 0;
729
730         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
731                 if (!tm_node->parent)
732                         continue;
733
734                 if (!(tm_node->parent->id == parent_id))
735                         continue;
736
737                 if (tm_node->priority == priority)
738                         rr_num++;
739         }
740         return rr_num;
741 }
742
743 static int
744 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
745 {
746         struct otx2_nix_tm_node *tm_node_child;
747         struct otx2_nix_tm_node *tm_node;
748         struct otx2_nix_tm_node *parent;
749         uint32_t rr_num = 0;
750         uint32_t priority;
751
752         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
753                 if (!tm_node->parent)
754                         continue;
755                 /* Count group of children of same priority i.e are RR */
756                 parent = tm_node->parent;
757                 priority = tm_node->priority;
758                 rr_num = check_rr(dev, priority, parent->id);
759
760                 /* Assuming that multiple RR groups are
761                  * not configured based on capability.
762                  */
763                 if (rr_num > 1) {
764                         parent->rr_prio = priority;
765                         parent->rr_num = rr_num;
766                 }
767
768                 /* Find out static priority children that are not in RR */
769                 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
770                         if (!tm_node_child->parent)
771                                 continue;
772                         if (parent->id != tm_node_child->parent->id)
773                                 continue;
774                         if (parent->max_prio == UINT32_MAX &&
775                             tm_node_child->priority != parent->rr_prio)
776                                 parent->max_prio = 0;
777
778                         if (parent->max_prio < tm_node_child->priority &&
779                             parent->rr_prio != tm_node_child->priority)
780                                 parent->max_prio = tm_node_child->priority;
781                 }
782         }
783
784         return 0;
785 }
786
787 static int
788 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
789                         uint32_t parent_node_id, uint32_t priority,
790                         uint32_t weight, uint16_t hw_lvl,
791                         uint16_t lvl, bool user,
792                         struct rte_tm_node_params *params)
793 {
794         struct otx2_nix_tm_shaper_profile *profile;
795         struct otx2_nix_tm_node *tm_node, *parent_node;
796         uint32_t profile_id;
797
798         profile_id = params->shaper_profile_id;
799         profile = nix_tm_shaper_profile_search(dev, profile_id);
800
801         parent_node = nix_tm_node_search(dev, parent_node_id, user);
802
803         tm_node = rte_zmalloc("otx2_nix_tm_node",
804                               sizeof(struct otx2_nix_tm_node), 0);
805         if (!tm_node)
806                 return -ENOMEM;
807
808         tm_node->lvl = lvl;
809         tm_node->hw_lvl = hw_lvl;
810
811         /* Maintain minimum weight */
812         if (!weight)
813                 weight = 1;
814
815         tm_node->id = node_id;
816         tm_node->priority = priority;
817         tm_node->weight = weight;
818         tm_node->rr_prio = 0xf;
819         tm_node->max_prio = UINT32_MAX;
820         tm_node->hw_id = UINT32_MAX;
821         tm_node->flags = 0;
822         if (user)
823                 tm_node->flags = NIX_TM_NODE_USER;
824
825         /* Packet mode */
826         if (!nix_tm_is_leaf(dev, lvl) &&
827             ((profile && profile->params.packet_mode) ||
828              (params->nonleaf.wfq_weight_mode &&
829               params->nonleaf.n_sp_priorities &&
830               !params->nonleaf.wfq_weight_mode[0])))
831                 tm_node->pkt_mode = 1;
832
833         rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
834
835         if (profile)
836                 profile->reference_count++;
837
838         tm_node->parent = parent_node;
839         tm_node->parent_hw_id = UINT32_MAX;
840         shaper_default_red_algo(dev, tm_node, profile);
841
842         TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
843
844         return 0;
845 }
846
847 static int
848 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
849 {
850         struct otx2_nix_tm_shaper_profile *shaper_profile;
851
852         while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
853                 if (shaper_profile->reference_count)
854                         otx2_tm_dbg("Shaper profile %u has non zero references",
855                                     shaper_profile->shaper_profile_id);
856                 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
857                 rte_free(shaper_profile);
858         }
859
860         return 0;
861 }
862
863 static int
864 nix_clear_path_xoff(struct otx2_eth_dev *dev,
865                     struct otx2_nix_tm_node *tm_node)
866 {
867         struct nix_txschq_config *req;
868         struct otx2_nix_tm_node *p;
869         int rc;
870
871         /* Manipulating SW_XOFF not supported on Ax */
872         if (otx2_dev_is_Ax(dev))
873                 return 0;
874
875         /* Enable nodes in path for flush to succeed */
876         if (!nix_tm_is_leaf(dev, tm_node->lvl))
877                 p = tm_node;
878         else
879                 p = tm_node->parent;
880         while (p) {
881                 if (!(p->flags & NIX_TM_NODE_ENABLED) &&
882                     (p->flags & NIX_TM_NODE_HWRES)) {
883                         req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
884                         req->lvl = p->hw_lvl;
885                         req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
886                                                            req->regval);
887                         rc = otx2_mbox_process(dev->mbox);
888                         if (rc)
889                                 return rc;
890
891                         p->flags |= NIX_TM_NODE_ENABLED;
892                 }
893                 p = p->parent;
894         }
895
896         return 0;
897 }
898
899 static int
900 nix_smq_xoff(struct otx2_eth_dev *dev,
901              struct otx2_nix_tm_node *tm_node,
902              bool enable)
903 {
904         struct otx2_mbox *mbox = dev->mbox;
905         struct nix_txschq_config *req;
906         uint16_t smq;
907         int rc;
908
909         smq = tm_node->hw_id;
910         otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
911                     enable ? "enable" : "disable");
912
913         rc = nix_clear_path_xoff(dev, tm_node);
914         if (rc)
915                 return rc;
916
917         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
918         req->lvl = NIX_TXSCH_LVL_SMQ;
919         req->num_regs = 1;
920
921         req->reg[0] = NIX_AF_SMQX_CFG(smq);
922         req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
923         req->regval_mask[0] = enable ?
924                                 ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
925
926         return otx2_mbox_process(mbox);
927 }
928
929 int
930 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
931 {
932         struct otx2_eth_txq *txq = __txq;
933         struct npa_aq_enq_req *req;
934         struct npa_aq_enq_rsp *rsp;
935         struct otx2_npa_lf *lf;
936         struct otx2_mbox *mbox;
937         uint64_t aura_handle;
938         int rc;
939
940         otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
941                     enable ? "enable" : "disable");
942
943         lf = otx2_npa_lf_obj_get();
944         if (!lf)
945                 return -EFAULT;
946         mbox = lf->mbox;
947         /* Set/clear sqb aura fc_ena */
948         aura_handle = txq->sqb_pool->pool_id;
949         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
950
951         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
952         req->ctype = NPA_AQ_CTYPE_AURA;
953         req->op = NPA_AQ_INSTOP_WRITE;
954         /* Below is not needed for aura writes but AF driver needs it */
955         /* AF will translate to associated poolctx */
956         req->aura.pool_addr = req->aura_id;
957
958         req->aura.fc_ena = enable;
959         req->aura_mask.fc_ena = 1;
960
961         rc = otx2_mbox_process(mbox);
962         if (rc)
963                 return rc;
964
965         /* Read back npa aura ctx */
966         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
967
968         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
969         req->ctype = NPA_AQ_CTYPE_AURA;
970         req->op = NPA_AQ_INSTOP_READ;
971
972         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
973         if (rc)
974                 return rc;
975
976         /* Init when enabled as there might be no triggers */
977         if (enable)
978                 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
979         else
980                 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
981         /* Sync write barrier */
982         rte_wmb();
983
984         return 0;
985 }
986
987 static int
988 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
989 {
990         uint16_t sqb_cnt, head_off, tail_off;
991         struct otx2_eth_dev *dev = txq->dev;
992         uint64_t wdata, val, prev;
993         uint16_t sq = txq->sq;
994         int64_t *regaddr;
995         uint64_t timeout;/* 10's of usec */
996
997         /* Wait for enough time based on shaper min rate */
998         timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
999         timeout = timeout / dev->tm_rate_min;
1000         if (!timeout)
1001                 timeout = 10000;
1002
1003         wdata = ((uint64_t)sq << 32);
1004         regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
1005         val = otx2_atomic64_add_nosync(wdata, regaddr);
1006
1007         /* Spin multiple iterations as "txq->fc_cache_pkts" can still
1008          * have space to send pkts even though fc_mem is disabled
1009          */
1010
1011         while (true) {
1012                 prev = val;
1013                 rte_delay_us(10);
1014                 val = otx2_atomic64_add_nosync(wdata, regaddr);
1015                 /* Continue on error */
1016                 if (val & BIT_ULL(63))
1017                         continue;
1018
1019                 if (prev != val)
1020                         continue;
1021
1022                 sqb_cnt = val & 0xFFFF;
1023                 head_off = (val >> 20) & 0x3F;
1024                 tail_off = (val >> 28) & 0x3F;
1025
1026                 /* SQ reached quiescent state */
1027                 if (sqb_cnt <= 1 && head_off == tail_off &&
1028                     (*txq->fc_mem == txq->nb_sqb_bufs)) {
1029                         break;
1030                 }
1031
1032                 /* Timeout */
1033                 if (!timeout)
1034                         goto exit;
1035                 timeout--;
1036         }
1037
1038         return 0;
1039 exit:
1040         otx2_nix_tm_dump(dev);
1041         return -EFAULT;
1042 }
1043
1044 /* Flush and disable tx queue and its parent SMQ */
1045 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
1046 {
1047         struct otx2_nix_tm_node *tm_node, *sibling;
1048         struct otx2_eth_txq *txq;
1049         struct otx2_eth_dev *dev;
1050         uint16_t sq;
1051         bool user;
1052         int rc;
1053
1054         txq = _txq;
1055         dev = txq->dev;
1056         sq = txq->sq;
1057
1058         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1059
1060         /* Find the node for this SQ */
1061         tm_node = nix_tm_node_search(dev, sq, user);
1062         if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
1063                 otx2_err("Invalid node/state for sq %u", sq);
1064                 return -EFAULT;
1065         }
1066
1067         /* Enable CGX RXTX to drain pkts */
1068         if (!dev_started) {
1069                 /* Though it enables both RX MCAM Entries and CGX Link
1070                  * we assume all the rx queues are stopped way back.
1071                  */
1072                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1073                 rc = otx2_mbox_process(dev->mbox);
1074                 if (rc) {
1075                         otx2_err("cgx start failed, rc=%d", rc);
1076                         return rc;
1077                 }
1078         }
1079
1080         /* Disable smq xoff for case it was enabled earlier */
1081         rc = nix_smq_xoff(dev, tm_node->parent, false);
1082         if (rc) {
1083                 otx2_err("Failed to enable smq %u, rc=%d",
1084                          tm_node->parent->hw_id, rc);
1085                 return rc;
1086         }
1087
1088         /* As per HRM, to disable an SQ, all other SQ's
1089          * that feed to same SMQ must be paused before SMQ flush.
1090          */
1091         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1092                 if (sibling->parent != tm_node->parent)
1093                         continue;
1094                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1095                         continue;
1096
1097                 sq = sibling->id;
1098                 txq = dev->eth_dev->data->tx_queues[sq];
1099                 if (!txq)
1100                         continue;
1101
1102                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1103                 if (rc) {
1104                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1105                         goto cleanup;
1106                 }
1107
1108                 /* Wait for sq entries to be flushed */
1109                 rc = nix_txq_flush_sq_spin(txq);
1110                 if (rc) {
1111                         otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
1112                         return rc;
1113                 }
1114         }
1115
1116         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
1117
1118         /* Disable and flush */
1119         rc = nix_smq_xoff(dev, tm_node->parent, true);
1120         if (rc) {
1121                 otx2_err("Failed to disable smq %u, rc=%d",
1122                          tm_node->parent->hw_id, rc);
1123                 goto cleanup;
1124         }
1125 cleanup:
1126         /* Restore cgx state */
1127         if (!dev_started) {
1128                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1129                 rc |= otx2_mbox_process(dev->mbox);
1130         }
1131
1132         return rc;
1133 }
1134
1135 int otx2_nix_sq_flush_post(void *_txq)
1136 {
1137         struct otx2_nix_tm_node *tm_node, *sibling;
1138         struct otx2_eth_txq *txq = _txq;
1139         struct otx2_eth_txq *s_txq;
1140         struct otx2_eth_dev *dev;
1141         bool once = false;
1142         uint16_t sq, s_sq;
1143         bool user;
1144         int rc;
1145
1146         dev = txq->dev;
1147         sq = txq->sq;
1148         user = !!(dev->tm_flags & NIX_TM_COMMITTED);
1149
1150         /* Find the node for this SQ */
1151         tm_node = nix_tm_node_search(dev, sq, user);
1152         if (!tm_node) {
1153                 otx2_err("Invalid node for sq %u", sq);
1154                 return -EFAULT;
1155         }
1156
1157         /* Enable all the siblings back */
1158         TAILQ_FOREACH(sibling, &dev->node_list, node) {
1159                 if (sibling->parent != tm_node->parent)
1160                         continue;
1161
1162                 if (sibling->id == sq)
1163                         continue;
1164
1165                 if (!(sibling->flags & NIX_TM_NODE_ENABLED))
1166                         continue;
1167
1168                 s_sq = sibling->id;
1169                 s_txq = dev->eth_dev->data->tx_queues[s_sq];
1170                 if (!s_txq)
1171                         continue;
1172
1173                 if (!once) {
1174                         /* Enable back if any SQ is still present */
1175                         rc = nix_smq_xoff(dev, tm_node->parent, false);
1176                         if (rc) {
1177                                 otx2_err("Failed to enable smq %u, rc=%d",
1178                                          tm_node->parent->hw_id, rc);
1179                                 return rc;
1180                         }
1181                         once = true;
1182                 }
1183
1184                 rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
1185                 if (rc) {
1186                         otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1187                         return rc;
1188                 }
1189         }
1190
1191         return 0;
1192 }
1193
1194 static int
1195 nix_sq_sched_data(struct otx2_eth_dev *dev,
1196                   struct otx2_nix_tm_node *tm_node,
1197                   bool rr_quantum_only)
1198 {
1199         struct rte_eth_dev *eth_dev = dev->eth_dev;
1200         struct otx2_mbox *mbox = dev->mbox;
1201         uint16_t sq = tm_node->id, smq;
1202         struct nix_aq_enq_req *req;
1203         uint64_t rr_quantum;
1204         int rc;
1205
1206         smq = tm_node->parent->hw_id;
1207         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1208
1209         if (rr_quantum_only)
1210                 otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
1211         else
1212                 otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
1213                             sq, smq, rr_quantum);
1214
1215         if (sq > eth_dev->data->nb_tx_queues)
1216                 return -EFAULT;
1217
1218         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1219         req->qidx = sq;
1220         req->ctype = NIX_AQ_CTYPE_SQ;
1221         req->op = NIX_AQ_INSTOP_WRITE;
1222
1223         /* smq update only when needed */
1224         if (!rr_quantum_only) {
1225                 req->sq.smq = smq;
1226                 req->sq_mask.smq = ~req->sq_mask.smq;
1227         }
1228         req->sq.smq_rr_quantum = rr_quantum;
1229         req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
1230
1231         rc = otx2_mbox_process(mbox);
1232         if (rc)
1233                 otx2_err("Failed to set smq, rc=%d", rc);
1234         return rc;
1235 }
1236
1237 int otx2_nix_sq_enable(void *_txq)
1238 {
1239         struct otx2_eth_txq *txq = _txq;
1240         int rc;
1241
1242         /* Enable sqb_aura fc */
1243         rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1244         if (rc) {
1245                 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
1246                 return rc;
1247         }
1248
1249         return 0;
1250 }
1251
1252 static int
1253 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
1254                       uint32_t flags, bool hw_only)
1255 {
1256         struct otx2_nix_tm_shaper_profile *profile;
1257         struct otx2_nix_tm_node *tm_node, *next_node;
1258         struct otx2_mbox *mbox = dev->mbox;
1259         struct nix_txsch_free_req *req;
1260         uint32_t profile_id;
1261         int rc = 0;
1262
1263         next_node = TAILQ_FIRST(&dev->node_list);
1264         while (next_node) {
1265                 tm_node = next_node;
1266                 next_node = TAILQ_NEXT(tm_node, node);
1267
1268                 /* Check for only requested nodes */
1269                 if ((tm_node->flags & flags_mask) != flags)
1270                         continue;
1271
1272                 if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
1273                     tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
1274                     tm_node->flags & NIX_TM_NODE_HWRES) {
1275                         /* Free specific HW resource */
1276                         otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
1277                                     nix_hwlvl2str(tm_node->hw_lvl),
1278                                     tm_node->hw_id, tm_node->lvl,
1279                                     tm_node->id, tm_node);
1280
1281                         rc = nix_clear_path_xoff(dev, tm_node);
1282                         if (rc)
1283                                 return rc;
1284
1285                         req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1286                         req->flags = 0;
1287                         req->schq_lvl = tm_node->hw_lvl;
1288                         req->schq = tm_node->hw_id;
1289                         rc = otx2_mbox_process(mbox);
1290                         if (rc)
1291                                 return rc;
1292                         tm_node->flags &= ~NIX_TM_NODE_HWRES;
1293                 }
1294
1295                 /* Leave software elements if needed */
1296                 if (hw_only)
1297                         continue;
1298
1299                 otx2_tm_dbg("Free node lvl %u id %u (%p)",
1300                             tm_node->lvl, tm_node->id, tm_node);
1301
1302                 profile_id = tm_node->params.shaper_profile_id;
1303                 profile = nix_tm_shaper_profile_search(dev, profile_id);
1304                 if (profile)
1305                         profile->reference_count--;
1306
1307                 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1308                 rte_free(tm_node);
1309         }
1310
1311         if (!flags_mask) {
1312                 /* Free all hw resources */
1313                 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1314                 req->flags = TXSCHQ_FREE_ALL;
1315
1316                 return otx2_mbox_process(mbox);
1317         }
1318
1319         return rc;
1320 }
1321
1322 static uint8_t
1323 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1324                        struct nix_txsch_alloc_rsp *rsp)
1325 {
1326         uint16_t schq;
1327         uint8_t lvl;
1328
1329         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1330                 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1331                         dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1332                         dev->txschq_contig_list[lvl][schq] =
1333                                 rsp->schq_contig_list[lvl][schq];
1334                 }
1335
1336                 dev->txschq[lvl] = rsp->schq[lvl];
1337                 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1338         }
1339         return 0;
1340 }
1341
1342 static int
1343 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1344                          struct otx2_nix_tm_node *child,
1345                          struct otx2_nix_tm_node *parent)
1346 {
1347         uint32_t hw_id, schq_con_index, prio_offset;
1348         uint32_t l_id, schq_index;
1349
1350         otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
1351                     nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
1352
1353         child->flags |= NIX_TM_NODE_HWRES;
1354
1355         /* Process root nodes */
1356         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1357             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1358                 int idx = 0;
1359                 uint32_t tschq_con_index;
1360
1361                 l_id = child->hw_lvl;
1362                 tschq_con_index = dev->txschq_contig_index[l_id];
1363                 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1364                 child->hw_id = hw_id;
1365                 dev->txschq_contig_index[l_id]++;
1366                 /* Update TL1 hw_id for its parent for config purpose */
1367                 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1368                 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1369                 child->parent_hw_id = hw_id;
1370                 return 0;
1371         }
1372         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1373             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1374                 uint32_t tschq_con_index;
1375
1376                 l_id = child->hw_lvl;
1377                 tschq_con_index = dev->txschq_index[l_id];
1378                 hw_id = dev->txschq_list[l_id][tschq_con_index];
1379                 child->hw_id = hw_id;
1380                 dev->txschq_index[l_id]++;
1381                 return 0;
1382         }
1383
1384         /* Process children with parents */
1385         l_id = child->hw_lvl;
1386         schq_index = dev->txschq_index[l_id];
1387         schq_con_index = dev->txschq_contig_index[l_id];
1388
1389         if (child->priority == parent->rr_prio) {
1390                 hw_id = dev->txschq_list[l_id][schq_index];
1391                 child->hw_id = hw_id;
1392                 child->parent_hw_id = parent->hw_id;
1393                 dev->txschq_index[l_id]++;
1394         } else {
1395                 prio_offset = schq_con_index + child->priority;
1396                 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1397                 child->hw_id = hw_id;
1398         }
1399         return 0;
1400 }
1401
1402 static int
1403 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1404 {
1405         struct otx2_nix_tm_node *parent, *child;
1406         uint32_t child_hw_lvl, con_index_inc, i;
1407
1408         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1409                 TAILQ_FOREACH(parent, &dev->node_list, node) {
1410                         child_hw_lvl = parent->hw_lvl - 1;
1411                         if (parent->hw_lvl != i)
1412                                 continue;
1413                         TAILQ_FOREACH(child, &dev->node_list, node) {
1414                                 if (!child->parent)
1415                                         continue;
1416                                 if (child->parent->id != parent->id)
1417                                         continue;
1418                                 nix_tm_assign_id_to_node(dev, child, parent);
1419                         }
1420
1421                         con_index_inc = parent->max_prio + 1;
1422                         dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1423
1424                         /*
1425                          * Explicitly assign id to parent node if it
1426                          * doesn't have a parent
1427                          */
1428                         if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1429                                 nix_tm_assign_id_to_node(dev, parent, NULL);
1430                 }
1431         }
1432         return 0;
1433 }
1434
1435 static uint8_t
1436 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1437                       struct nix_txsch_alloc_req *req, uint8_t lvl)
1438 {
1439         struct otx2_nix_tm_node *tm_node;
1440         uint8_t contig_count;
1441
1442         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1443                 if (lvl == tm_node->hw_lvl) {
1444                         req->schq[lvl - 1] += tm_node->rr_num;
1445                         if (tm_node->max_prio != UINT32_MAX) {
1446                                 contig_count = tm_node->max_prio + 1;
1447                                 req->schq_contig[lvl - 1] += contig_count;
1448                         }
1449                 }
1450                 if (lvl == dev->otx2_tm_root_lvl &&
1451                     dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1452                     tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1453                         req->schq_contig[dev->otx2_tm_root_lvl]++;
1454                 }
1455         }
1456
1457         req->schq[NIX_TXSCH_LVL_TL1] = 1;
1458         req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1459
1460         return 0;
1461 }
1462
1463 static int
1464 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1465                           struct nix_txsch_alloc_req *req)
1466 {
1467         uint8_t i;
1468
1469         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1470                 nix_tm_count_req_schq(dev, req, i);
1471
1472         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1473                 dev->txschq_index[i] = 0;
1474                 dev->txschq_contig_index[i] = 0;
1475         }
1476         return 0;
1477 }
1478
1479 static int
1480 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1481 {
1482         struct otx2_mbox *mbox = dev->mbox;
1483         struct nix_txsch_alloc_req *req;
1484         struct nix_txsch_alloc_rsp *rsp;
1485         int rc;
1486
1487         req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1488
1489         rc = nix_tm_prepare_txschq_req(dev, req);
1490         if (rc)
1491                 return rc;
1492
1493         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1494         if (rc)
1495                 return rc;
1496
1497         nix_tm_copy_rsp_to_dev(dev, rsp);
1498         dev->link_cfg_lvl = rsp->link_cfg_lvl;
1499
1500         nix_tm_assign_hw_id(dev);
1501         return 0;
1502 }
1503
1504 static int
1505 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1506 {
1507         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1508         struct otx2_nix_tm_node *tm_node;
1509         struct otx2_eth_txq *txq;
1510         uint16_t sq;
1511         int rc;
1512
1513         nix_tm_update_parent_info(dev);
1514
1515         rc = nix_tm_send_txsch_alloc_msg(dev);
1516         if (rc) {
1517                 otx2_err("TM failed to alloc tm resources=%d", rc);
1518                 return rc;
1519         }
1520
1521         rc = nix_tm_txsch_reg_config(dev);
1522         if (rc) {
1523                 otx2_err("TM failed to configure sched registers=%d", rc);
1524                 return rc;
1525         }
1526
1527         /* Trigger MTU recalculate as SMQ needs MTU conf */
1528         if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
1529                 rc = otx2_nix_recalc_mtu(eth_dev);
1530                 if (rc) {
1531                         otx2_err("TM MTU update failed, rc=%d", rc);
1532                         return rc;
1533                 }
1534         }
1535
1536         /* Mark all non-leaf's as enabled */
1537         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1538                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1539                         tm_node->flags |= NIX_TM_NODE_ENABLED;
1540         }
1541
1542         if (!xmit_enable)
1543                 return 0;
1544
1545         /* Update SQ Sched Data while SQ is idle */
1546         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1547                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1548                         continue;
1549
1550                 rc = nix_sq_sched_data(dev, tm_node, false);
1551                 if (rc) {
1552                         otx2_err("SQ %u sched update failed, rc=%d",
1553                                  tm_node->id, rc);
1554                         return rc;
1555                 }
1556         }
1557
1558         /* Finally XON all SMQ's */
1559         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1560                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1561                         continue;
1562
1563                 rc = nix_smq_xoff(dev, tm_node, false);
1564                 if (rc) {
1565                         otx2_err("Failed to enable smq %u, rc=%d",
1566                                  tm_node->hw_id, rc);
1567                         return rc;
1568                 }
1569         }
1570
1571         /* Enable xmit as all the topology is ready */
1572         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1573                 if (!nix_tm_is_leaf(dev, tm_node->lvl))
1574                         continue;
1575
1576                 sq = tm_node->id;
1577                 txq = eth_dev->data->tx_queues[sq];
1578
1579                 rc = otx2_nix_sq_enable(txq);
1580                 if (rc) {
1581                         otx2_err("TM sw xon failed on SQ %u, rc=%d",
1582                                  tm_node->id, rc);
1583                         return rc;
1584                 }
1585                 tm_node->flags |= NIX_TM_NODE_ENABLED;
1586         }
1587
1588         return 0;
1589 }
1590
1591 static int
1592 send_tm_reqval(struct otx2_mbox *mbox,
1593                struct nix_txschq_config *req,
1594                struct rte_tm_error *error)
1595 {
1596         int rc;
1597
1598         if (!req->num_regs ||
1599             req->num_regs > MAX_REGS_PER_MBOX_MSG) {
1600                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1601                 error->message = "invalid config";
1602                 return -EIO;
1603         }
1604
1605         rc = otx2_mbox_process(mbox);
1606         if (rc) {
1607                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1608                 error->message = "unexpected fatal error";
1609         }
1610         return rc;
1611 }
1612
1613 static uint16_t
1614 nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
1615 {
1616         if (nix_tm_have_tl1_access(dev)) {
1617                 switch (lvl) {
1618                 case OTX2_TM_LVL_ROOT:
1619                         return NIX_TXSCH_LVL_TL1;
1620                 case OTX2_TM_LVL_SCH1:
1621                         return NIX_TXSCH_LVL_TL2;
1622                 case OTX2_TM_LVL_SCH2:
1623                         return NIX_TXSCH_LVL_TL3;
1624                 case OTX2_TM_LVL_SCH3:
1625                         return NIX_TXSCH_LVL_TL4;
1626                 case OTX2_TM_LVL_SCH4:
1627                         return NIX_TXSCH_LVL_SMQ;
1628                 default:
1629                         return NIX_TXSCH_LVL_CNT;
1630                 }
1631         } else {
1632                 switch (lvl) {
1633                 case OTX2_TM_LVL_ROOT:
1634                         return NIX_TXSCH_LVL_TL2;
1635                 case OTX2_TM_LVL_SCH1:
1636                         return NIX_TXSCH_LVL_TL3;
1637                 case OTX2_TM_LVL_SCH2:
1638                         return NIX_TXSCH_LVL_TL4;
1639                 case OTX2_TM_LVL_SCH3:
1640                         return NIX_TXSCH_LVL_SMQ;
1641                 default:
1642                         return NIX_TXSCH_LVL_CNT;
1643                 }
1644         }
1645 }
1646
1647 static uint16_t
1648 nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
1649 {
1650         if (hw_lvl >= NIX_TXSCH_LVL_CNT)
1651                 return 0;
1652
1653         /* MDQ doesn't support SP */
1654         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
1655                 return 0;
1656
1657         /* PF's TL1 with VF's enabled doesn't support SP */
1658         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
1659             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
1660              (dev->tm_flags & NIX_TM_TL1_NO_SP)))
1661                 return 0;
1662
1663         return TXSCH_TLX_SP_PRIO_MAX - 1;
1664 }
1665
1666
1667 static int
1668 validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
1669               uint32_t parent_id, uint32_t priority,
1670               struct rte_tm_error *error)
1671 {
1672         uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
1673         struct otx2_nix_tm_node *tm_node;
1674         uint32_t rr_num = 0;
1675         int i;
1676
1677         /* Validate priority against max */
1678         if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
1679                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
1680                 error->message = "unsupported priority value";
1681                 return -EINVAL;
1682         }
1683
1684         if (parent_id == RTE_TM_NODE_ID_NULL)
1685                 return 0;
1686
1687         memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
1688         priorities[priority] = 1;
1689
1690         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1691                 if (!tm_node->parent)
1692                         continue;
1693
1694                 if (!(tm_node->flags & NIX_TM_NODE_USER))
1695                         continue;
1696
1697                 if (tm_node->parent->id != parent_id)
1698                         continue;
1699
1700                 priorities[tm_node->priority]++;
1701         }
1702
1703         for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
1704                 if (priorities[i] > 1)
1705                         rr_num++;
1706
1707         /* At max, one rr groups per parent */
1708         if (rr_num > 1) {
1709                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1710                 error->message = "multiple DWRR node priority";
1711                 return -EINVAL;
1712         }
1713
1714         /* Check for previous priority to avoid holes in priorities */
1715         if (priority && !priorities[priority - 1]) {
1716                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
1717                 error->message = "priority not in order";
1718                 return -EINVAL;
1719         }
1720
1721         return 0;
1722 }
1723
1724 static int
1725 read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
1726             uint64_t *regval, uint32_t hw_lvl)
1727 {
1728         volatile struct nix_txschq_config *req;
1729         struct nix_txschq_config *rsp;
1730         int rc;
1731
1732         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
1733         req->read = 1;
1734         req->lvl = hw_lvl;
1735         req->reg[0] = reg;
1736         req->num_regs = 1;
1737
1738         rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
1739         if (rc)
1740                 return rc;
1741         *regval = rsp->regval[0];
1742         return 0;
1743 }
1744
1745 /* Search for min rate in topology */
1746 static void
1747 nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
1748 {
1749         struct otx2_nix_tm_shaper_profile *profile;
1750         uint64_t rate_min = 1E9; /* 1 Gbps */
1751
1752         TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
1753                 if (profile->params.peak.rate &&
1754                     profile->params.peak.rate < rate_min)
1755                         rate_min = profile->params.peak.rate;
1756
1757                 if (profile->params.committed.rate &&
1758                     profile->params.committed.rate < rate_min)
1759                         rate_min = profile->params.committed.rate;
1760         }
1761
1762         dev->tm_rate_min = rate_min;
1763 }
1764
1765 static int
1766 nix_xmit_disable(struct rte_eth_dev *eth_dev)
1767 {
1768         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1769         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1770         uint16_t sqb_cnt, head_off, tail_off;
1771         struct otx2_nix_tm_node *tm_node;
1772         struct otx2_eth_txq *txq;
1773         uint64_t wdata, val;
1774         int i, rc;
1775
1776         otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
1777
1778         /* Enable CGX RXTX to drain pkts */
1779         if (!eth_dev->data->dev_started) {
1780                 otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
1781                 rc = otx2_mbox_process(dev->mbox);
1782                 if (rc)
1783                         return rc;
1784         }
1785
1786         /* XON all SMQ's */
1787         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1788                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1789                         continue;
1790                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1791                         continue;
1792
1793                 rc = nix_smq_xoff(dev, tm_node, false);
1794                 if (rc) {
1795                         otx2_err("Failed to enable smq %u, rc=%d",
1796                                  tm_node->hw_id, rc);
1797                         goto cleanup;
1798                 }
1799         }
1800
1801         /* Flush all tx queues */
1802         for (i = 0; i < sq_cnt; i++) {
1803                 txq = eth_dev->data->tx_queues[i];
1804
1805                 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1806                 if (rc) {
1807                         otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
1808                         goto cleanup;
1809                 }
1810
1811                 /* Wait for sq entries to be flushed */
1812                 rc = nix_txq_flush_sq_spin(txq);
1813                 if (rc) {
1814                         otx2_err("Failed to drain sq, rc=%d\n", rc);
1815                         goto cleanup;
1816                 }
1817         }
1818
1819         /* XOFF & Flush all SMQ's. HRM mandates
1820          * all SQ's empty before SMQ flush is issued.
1821          */
1822         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1823                 if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
1824                         continue;
1825                 if (!(tm_node->flags & NIX_TM_NODE_HWRES))
1826                         continue;
1827
1828                 rc = nix_smq_xoff(dev, tm_node, true);
1829                 if (rc) {
1830                         otx2_err("Failed to enable smq %u, rc=%d",
1831                                  tm_node->hw_id, rc);
1832                         goto cleanup;
1833                 }
1834         }
1835
1836         /* Verify sanity of all tx queues */
1837         for (i = 0; i < sq_cnt; i++) {
1838                 txq = eth_dev->data->tx_queues[i];
1839
1840                 wdata = ((uint64_t)txq->sq << 32);
1841                 val = otx2_atomic64_add_nosync(wdata,
1842                                (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
1843
1844                 sqb_cnt = val & 0xFFFF;
1845                 head_off = (val >> 20) & 0x3F;
1846                 tail_off = (val >> 28) & 0x3F;
1847
1848                 if (sqb_cnt > 1 || head_off != tail_off ||
1849                     (*txq->fc_mem != txq->nb_sqb_bufs))
1850                         otx2_err("Failed to gracefully flush sq %u", txq->sq);
1851         }
1852
1853 cleanup:
1854         /* restore cgx state */
1855         if (!eth_dev->data->dev_started) {
1856                 otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
1857                 rc |= otx2_mbox_process(dev->mbox);
1858         }
1859
1860         return rc;
1861 }
1862
1863 static int
1864 otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
1865                           int *is_leaf, struct rte_tm_error *error)
1866 {
1867         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1868         struct otx2_nix_tm_node *tm_node;
1869
1870         if (is_leaf == NULL) {
1871                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1872                 return -EINVAL;
1873         }
1874
1875         tm_node = nix_tm_node_search(dev, node_id, true);
1876         if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
1877                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
1878                 return -EINVAL;
1879         }
1880         if (nix_tm_is_leaf(dev, tm_node->lvl))
1881                 *is_leaf = true;
1882         else
1883                 *is_leaf = false;
1884         return 0;
1885 }
1886
1887 static int
1888 otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
1889                      struct rte_tm_capabilities *cap,
1890                      struct rte_tm_error *error)
1891 {
1892         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1893         struct otx2_mbox *mbox = dev->mbox;
1894         int rc, max_nr_nodes = 0, i;
1895         struct free_rsrcs_rsp *rsp;
1896
1897         memset(cap, 0, sizeof(*cap));
1898
1899         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1900         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1901         if (rc) {
1902                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1903                 error->message = "unexpected fatal error";
1904                 return rc;
1905         }
1906
1907         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
1908                 max_nr_nodes += rsp->schq[i];
1909
1910         cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
1911         /* TL1 level is reserved for PF */
1912         cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
1913                                 OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
1914         cap->non_leaf_nodes_identical = 1;
1915         cap->leaf_nodes_identical = 1;
1916
1917         /* Shaper Capabilities */
1918         cap->shaper_private_n_max = max_nr_nodes;
1919         cap->shaper_n_max = max_nr_nodes;
1920         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
1921         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1922         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1923         cap->shaper_private_packet_mode_supported = 1;
1924         cap->shaper_private_byte_mode_supported = 1;
1925         cap->shaper_pkt_length_adjust_min = NIX_LENGTH_ADJUST_MIN;
1926         cap->shaper_pkt_length_adjust_max = NIX_LENGTH_ADJUST_MAX;
1927
1928         /* Schedule Capabilities */
1929         cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
1930         cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
1931         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
1932         cap->sched_wfq_n_groups_max = 1;
1933         cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
1934         cap->sched_wfq_packet_mode_supported = 1;
1935         cap->sched_wfq_byte_mode_supported = 1;
1936
1937         cap->dynamic_update_mask =
1938                 RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
1939                 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
1940         cap->stats_mask =
1941                 RTE_TM_STATS_N_PKTS |
1942                 RTE_TM_STATS_N_BYTES |
1943                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
1944                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
1945
1946         for (i = 0; i < RTE_COLORS; i++) {
1947                 cap->mark_vlan_dei_supported[i] = false;
1948                 cap->mark_ip_ecn_tcp_supported[i] = false;
1949                 cap->mark_ip_dscp_supported[i] = false;
1950         }
1951
1952         return 0;
1953 }
1954
1955 static int
1956 otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
1957                                    struct rte_tm_level_capabilities *cap,
1958                                    struct rte_tm_error *error)
1959 {
1960         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1961         struct otx2_mbox *mbox = dev->mbox;
1962         struct free_rsrcs_rsp *rsp;
1963         uint16_t hw_lvl;
1964         int rc;
1965
1966         memset(cap, 0, sizeof(*cap));
1967
1968         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
1969         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1970         if (rc) {
1971                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
1972                 error->message = "unexpected fatal error";
1973                 return rc;
1974         }
1975
1976         hw_lvl = nix_tm_lvl2nix(dev, lvl);
1977
1978         if (nix_tm_is_leaf(dev, lvl)) {
1979                 /* Leaf */
1980                 cap->n_nodes_max = dev->tm_leaf_cnt;
1981                 cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
1982                 cap->leaf_nodes_identical = 1;
1983                 cap->leaf.stats_mask =
1984                         RTE_TM_STATS_N_PKTS |
1985                         RTE_TM_STATS_N_BYTES;
1986
1987         } else if (lvl == OTX2_TM_LVL_ROOT) {
1988                 /* Root node, aka TL2(vf)/TL1(pf) */
1989                 cap->n_nodes_max = 1;
1990                 cap->n_nodes_nonleaf_max = 1;
1991                 cap->non_leaf_nodes_identical = 1;
1992
1993                 cap->nonleaf.shaper_private_supported = true;
1994                 cap->nonleaf.shaper_private_dual_rate_supported =
1995                         nix_tm_have_tl1_access(dev) ? false : true;
1996                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
1997                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
1998                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
1999                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
2000
2001                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2002                 cap->nonleaf.sched_sp_n_priorities_max =
2003                                         nix_max_prio(dev, hw_lvl) + 1;
2004                 cap->nonleaf.sched_wfq_n_groups_max = 1;
2005                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2006                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2007                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2008
2009                 if (nix_tm_have_tl1_access(dev))
2010                         cap->nonleaf.stats_mask =
2011                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
2012                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
2013         } else if ((lvl < OTX2_TM_LVL_MAX) &&
2014                    (hw_lvl < NIX_TXSCH_LVL_CNT)) {
2015                 /* TL2, TL3, TL4, MDQ */
2016                 cap->n_nodes_max = rsp->schq[hw_lvl];
2017                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
2018                 cap->non_leaf_nodes_identical = 1;
2019
2020                 cap->nonleaf.shaper_private_supported = true;
2021                 cap->nonleaf.shaper_private_dual_rate_supported = true;
2022                 cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2023                 cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2024                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
2025                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
2026
2027                 /* MDQ doesn't support Strict Priority */
2028                 if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2029                         cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2030                 else
2031                         cap->nonleaf.sched_n_children_max =
2032                                 rsp->schq[hw_lvl - 1];
2033                 cap->nonleaf.sched_sp_n_priorities_max =
2034                         nix_max_prio(dev, hw_lvl) + 1;
2035                 cap->nonleaf.sched_wfq_n_groups_max = 1;
2036                 cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2037                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2038                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2039         } else {
2040                 /* unsupported level */
2041                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2042                 return rc;
2043         }
2044         return 0;
2045 }
2046
2047 static int
2048 otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
2049                           struct rte_tm_node_capabilities *cap,
2050                           struct rte_tm_error *error)
2051 {
2052         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2053         struct otx2_mbox *mbox = dev->mbox;
2054         struct otx2_nix_tm_node *tm_node;
2055         struct free_rsrcs_rsp *rsp;
2056         int rc, hw_lvl, lvl;
2057
2058         memset(cap, 0, sizeof(*cap));
2059
2060         tm_node = nix_tm_node_search(dev, node_id, true);
2061         if (!tm_node) {
2062                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2063                 error->message = "no such node";
2064                 return -EINVAL;
2065         }
2066
2067         hw_lvl = tm_node->hw_lvl;
2068         lvl = tm_node->lvl;
2069
2070         /* Leaf node */
2071         if (nix_tm_is_leaf(dev, lvl)) {
2072                 cap->stats_mask = RTE_TM_STATS_N_PKTS |
2073                                         RTE_TM_STATS_N_BYTES;
2074                 return 0;
2075         }
2076
2077         otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
2078         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
2079         if (rc) {
2080                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2081                 error->message = "unexpected fatal error";
2082                 return rc;
2083         }
2084
2085         /* Non Leaf Shaper */
2086         cap->shaper_private_supported = true;
2087         cap->shaper_private_dual_rate_supported =
2088                 (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
2089         cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
2090         cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
2091         cap->shaper_private_packet_mode_supported = 1;
2092         cap->shaper_private_byte_mode_supported = 1;
2093
2094         /* Non Leaf Scheduler */
2095         if (hw_lvl == NIX_TXSCH_LVL_MDQ)
2096                 cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
2097         else
2098                 cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
2099
2100         cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
2101         cap->nonleaf.sched_wfq_n_children_per_group_max =
2102                 cap->nonleaf.sched_n_children_max;
2103         cap->nonleaf.sched_wfq_n_groups_max = 1;
2104         cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
2105         cap->nonleaf.sched_wfq_packet_mode_supported = 1;
2106         cap->nonleaf.sched_wfq_byte_mode_supported = 1;
2107
2108         if (hw_lvl == NIX_TXSCH_LVL_TL1)
2109                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2110                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2111         return 0;
2112 }
2113
2114 static int
2115 otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
2116                                uint32_t profile_id,
2117                                struct rte_tm_shaper_params *params,
2118                                struct rte_tm_error *error)
2119 {
2120         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2121         struct otx2_nix_tm_shaper_profile *profile;
2122
2123         profile = nix_tm_shaper_profile_search(dev, profile_id);
2124         if (profile) {
2125                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2126                 error->message = "shaper profile ID exist";
2127                 return -EINVAL;
2128         }
2129
2130         /* Committed rate and burst size can be enabled/disabled */
2131         if (params->committed.size || params->committed.rate) {
2132                 if (params->committed.size < MIN_SHAPER_BURST ||
2133                     params->committed.size > MAX_SHAPER_BURST) {
2134                         error->type =
2135                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
2136                         return -EINVAL;
2137                 } else if (!shaper_rate_to_nix(params->committed.rate * 8,
2138                                                NULL, NULL, NULL)) {
2139                         error->type =
2140                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2141                         error->message = "shaper committed rate invalid";
2142                         return -EINVAL;
2143                 }
2144         }
2145
2146         /* Peak rate and burst size can be enabled/disabled */
2147         if (params->peak.size || params->peak.rate) {
2148                 if (params->peak.size < MIN_SHAPER_BURST ||
2149                     params->peak.size > MAX_SHAPER_BURST) {
2150                         error->type =
2151                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
2152                         return -EINVAL;
2153                 } else if (!shaper_rate_to_nix(params->peak.rate * 8,
2154                                                NULL, NULL, NULL)) {
2155                         error->type =
2156                                 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
2157                         error->message = "shaper peak rate invalid";
2158                         return -EINVAL;
2159                 }
2160         }
2161
2162         if (params->pkt_length_adjust < NIX_LENGTH_ADJUST_MIN ||
2163             params->pkt_length_adjust > NIX_LENGTH_ADJUST_MAX) {
2164                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
2165                 error->message = "length adjust invalid";
2166                 return -EINVAL;
2167         }
2168
2169         profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
2170                               sizeof(struct otx2_nix_tm_shaper_profile), 0);
2171         if (!profile)
2172                 return -ENOMEM;
2173
2174         profile->shaper_profile_id = profile_id;
2175         rte_memcpy(&profile->params, params,
2176                    sizeof(struct rte_tm_shaper_params));
2177         TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
2178
2179         otx2_tm_dbg("Added TM shaper profile %u, "
2180                     " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
2181                     ", cbs %" PRIu64 " , adj %u, pkt mode %d",
2182                     profile_id,
2183                     params->peak.rate * 8,
2184                     params->peak.size,
2185                     params->committed.rate * 8,
2186                     params->committed.size,
2187                     params->pkt_length_adjust,
2188                     params->packet_mode);
2189
2190         /* Translate rate as bits per second */
2191         profile->params.peak.rate = profile->params.peak.rate * 8;
2192         profile->params.committed.rate = profile->params.committed.rate * 8;
2193         /* Always use PIR for single rate shaping */
2194         if (!params->peak.rate && params->committed.rate) {
2195                 profile->params.peak = profile->params.committed;
2196                 memset(&profile->params.committed, 0,
2197                        sizeof(profile->params.committed));
2198         }
2199
2200         /* update min rate */
2201         nix_tm_shaper_profile_update_min(dev);
2202         return 0;
2203 }
2204
2205 static int
2206 otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
2207                                   uint32_t profile_id,
2208                                   struct rte_tm_error *error)
2209 {
2210         struct otx2_nix_tm_shaper_profile *profile;
2211         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2212
2213         profile = nix_tm_shaper_profile_search(dev, profile_id);
2214
2215         if (!profile) {
2216                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2217                 error->message = "shaper profile ID not exist";
2218                 return -EINVAL;
2219         }
2220
2221         if (profile->reference_count) {
2222                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2223                 error->message = "shaper profile in use";
2224                 return -EINVAL;
2225         }
2226
2227         otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
2228         TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
2229         rte_free(profile);
2230
2231         /* update min rate */
2232         nix_tm_shaper_profile_update_min(dev);
2233         return 0;
2234 }
2235
2236 static int
2237 otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
2238                      uint32_t parent_node_id, uint32_t priority,
2239                      uint32_t weight, uint32_t lvl,
2240                      struct rte_tm_node_params *params,
2241                      struct rte_tm_error *error)
2242 {
2243         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2244         struct otx2_nix_tm_shaper_profile *profile = NULL;
2245         struct otx2_nix_tm_node *parent_node;
2246         int rc, pkt_mode, clear_on_fail = 0;
2247         uint32_t exp_next_lvl, i;
2248         uint32_t profile_id;
2249         uint16_t hw_lvl;
2250
2251         /* we don't support dynamic updates */
2252         if (dev->tm_flags & NIX_TM_COMMITTED) {
2253                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2254                 error->message = "dynamic update not supported";
2255                 return -EIO;
2256         }
2257
2258         /* Leaf nodes have to be same priority */
2259         if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
2260                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2261                 error->message = "queue shapers must be priority 0";
2262                 return -EIO;
2263         }
2264
2265         parent_node = nix_tm_node_search(dev, parent_node_id, true);
2266
2267         /* find the right level */
2268         if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
2269                 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
2270                         lvl = OTX2_TM_LVL_ROOT;
2271                 } else if (parent_node) {
2272                         lvl = parent_node->lvl + 1;
2273                 } else {
2274                         /* Neigher proper parent nor proper level id given */
2275                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2276                         error->message = "invalid parent node id";
2277                         return -ERANGE;
2278                 }
2279         }
2280
2281         /* Translate rte_tm level id's to nix hw level id's */
2282         hw_lvl = nix_tm_lvl2nix(dev, lvl);
2283         if (hw_lvl == NIX_TXSCH_LVL_CNT &&
2284             !nix_tm_is_leaf(dev, lvl)) {
2285                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
2286                 error->message = "invalid level id";
2287                 return -ERANGE;
2288         }
2289
2290         if (node_id < dev->tm_leaf_cnt)
2291                 exp_next_lvl = NIX_TXSCH_LVL_SMQ;
2292         else
2293                 exp_next_lvl = hw_lvl + 1;
2294
2295         /* Check if there is no parent node yet */
2296         if (hw_lvl != dev->otx2_tm_root_lvl &&
2297             (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
2298                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2299                 error->message = "invalid parent node id";
2300                 return -EINVAL;
2301         }
2302
2303         /* Check if a node already exists */
2304         if (nix_tm_node_search(dev, node_id, true)) {
2305                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2306                 error->message = "node already exists";
2307                 return -EINVAL;
2308         }
2309
2310         if (!nix_tm_is_leaf(dev, lvl)) {
2311                 /* Check if shaper profile exists for non leaf node */
2312                 profile_id = params->shaper_profile_id;
2313                 profile = nix_tm_shaper_profile_search(dev, profile_id);
2314                 if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE && !profile) {
2315                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2316                         error->message = "invalid shaper profile";
2317                         return -EINVAL;
2318                 }
2319
2320                 /* Minimum static priority count is 1 */
2321                 if (!params->nonleaf.n_sp_priorities ||
2322                     params->nonleaf.n_sp_priorities > TXSCH_TLX_SP_PRIO_MAX) {
2323                         error->type =
2324                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
2325                         error->message = "invalid sp priorities";
2326                         return -EINVAL;
2327                 }
2328
2329                 pkt_mode = 0;
2330                 /* Validate weight mode */
2331                 for (i = 0; i < params->nonleaf.n_sp_priorities &&
2332                      params->nonleaf.wfq_weight_mode; i++) {
2333                         pkt_mode = !params->nonleaf.wfq_weight_mode[i];
2334                         if (pkt_mode == !params->nonleaf.wfq_weight_mode[0])
2335                                 continue;
2336
2337                         error->type =
2338                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
2339                         error->message = "unsupported weight mode";
2340                         return -EINVAL;
2341                 }
2342
2343                 if (profile && params->nonleaf.n_sp_priorities &&
2344                     pkt_mode != profile->params.packet_mode) {
2345                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
2346                         error->message = "shaper wfq packet mode mismatch";
2347                         return -EINVAL;
2348                 }
2349         }
2350
2351         /* Check if there is second DWRR already in siblings or holes in prio */
2352         if (validate_prio(dev, lvl, parent_node_id, priority, error))
2353                 return -EINVAL;
2354
2355         if (weight > MAX_SCHED_WEIGHT) {
2356                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
2357                 error->message = "max weight exceeded";
2358                 return -EINVAL;
2359         }
2360
2361         rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
2362                                      priority, weight, hw_lvl,
2363                                      lvl, true, params);
2364         if (rc) {
2365                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2366                 /* cleanup user added nodes */
2367                 if (clear_on_fail)
2368                         nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2369                                               NIX_TM_NODE_USER, false);
2370                 error->message = "failed to add node";
2371                 return rc;
2372         }
2373         error->type = RTE_TM_ERROR_TYPE_NONE;
2374         return 0;
2375 }
2376
2377 static int
2378 otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
2379                         struct rte_tm_error *error)
2380 {
2381         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2382         struct otx2_nix_tm_node *tm_node, *child_node;
2383         struct otx2_nix_tm_shaper_profile *profile;
2384         uint32_t profile_id;
2385
2386         /* we don't support dynamic updates yet */
2387         if (dev->tm_flags & NIX_TM_COMMITTED) {
2388                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
2389                 error->message = "hierarchy exists";
2390                 return -EIO;
2391         }
2392
2393         if (node_id == RTE_TM_NODE_ID_NULL) {
2394                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2395                 error->message = "invalid node id";
2396                 return -EINVAL;
2397         }
2398
2399         tm_node = nix_tm_node_search(dev, node_id, true);
2400         if (!tm_node) {
2401                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2402                 error->message = "no such node";
2403                 return -EINVAL;
2404         }
2405
2406         /* Check for any existing children */
2407         TAILQ_FOREACH(child_node, &dev->node_list, node) {
2408                 if (child_node->parent == tm_node) {
2409                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2410                         error->message = "children exist";
2411                         return -EINVAL;
2412                 }
2413         }
2414
2415         /* Remove shaper profile reference */
2416         profile_id = tm_node->params.shaper_profile_id;
2417         profile = nix_tm_shaper_profile_search(dev, profile_id);
2418         profile->reference_count--;
2419
2420         TAILQ_REMOVE(&dev->node_list, tm_node, node);
2421         rte_free(tm_node);
2422         return 0;
2423 }
2424
2425 static int
2426 nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2427                            struct rte_tm_error *error, bool suspend)
2428 {
2429         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2430         struct otx2_mbox *mbox = dev->mbox;
2431         struct otx2_nix_tm_node *tm_node;
2432         struct nix_txschq_config *req;
2433         uint16_t flags;
2434         int rc;
2435
2436         tm_node = nix_tm_node_search(dev, node_id, true);
2437         if (!tm_node) {
2438                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2439                 error->message = "no such node";
2440                 return -EINVAL;
2441         }
2442
2443         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2444                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2445                 error->message = "hierarchy doesn't exist";
2446                 return -EINVAL;
2447         }
2448
2449         flags = tm_node->flags;
2450         flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
2451                 (flags | NIX_TM_NODE_ENABLED);
2452
2453         if (tm_node->flags == flags)
2454                 return 0;
2455
2456         /* send mbox for state change */
2457         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2458
2459         req->lvl = tm_node->hw_lvl;
2460         req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
2461                                            req->reg, req->regval);
2462         rc = send_tm_reqval(mbox, req, error);
2463         if (!rc)
2464                 tm_node->flags = flags;
2465         return rc;
2466 }
2467
2468 static int
2469 otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
2470                          struct rte_tm_error *error)
2471 {
2472         return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
2473 }
2474
2475 static int
2476 otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
2477                         struct rte_tm_error *error)
2478 {
2479         return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
2480 }
2481
2482 static int
2483 otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
2484                              int clear_on_fail,
2485                              struct rte_tm_error *error)
2486 {
2487         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2488         struct otx2_nix_tm_node *tm_node;
2489         uint32_t leaf_cnt = 0;
2490         int rc;
2491
2492         if (dev->tm_flags & NIX_TM_COMMITTED) {
2493                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2494                 error->message = "hierarchy exists";
2495                 return -EINVAL;
2496         }
2497
2498         /* Check if we have all the leaf nodes */
2499         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
2500                 if (tm_node->flags & NIX_TM_NODE_USER &&
2501                     tm_node->id < dev->tm_leaf_cnt)
2502                         leaf_cnt++;
2503         }
2504
2505         if (leaf_cnt != dev->tm_leaf_cnt) {
2506                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2507                 error->message = "incomplete hierarchy";
2508                 return -EINVAL;
2509         }
2510
2511         /*
2512          * Disable xmit will be enabled when
2513          * new topology is available.
2514          */
2515         rc = nix_xmit_disable(eth_dev);
2516         if (rc) {
2517                 otx2_err("failed to disable TX, rc=%d", rc);
2518                 return -EIO;
2519         }
2520
2521         /* Delete default/ratelimit tree */
2522         if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
2523                 rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
2524                 if (rc) {
2525                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2526                         error->message = "failed to free default resources";
2527                         return rc;
2528                 }
2529                 dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
2530                                    NIX_TM_RATE_LIMIT_TREE);
2531         }
2532
2533         /* Free up user alloc'ed resources */
2534         rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
2535                                    NIX_TM_NODE_USER, true);
2536         if (rc) {
2537                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2538                 error->message = "failed to free user resources";
2539                 return rc;
2540         }
2541
2542         rc = nix_tm_alloc_resources(eth_dev, true);
2543         if (rc) {
2544                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2545                 error->message = "alloc resources failed";
2546                 /* TODO should we restore default config ? */
2547                 if (clear_on_fail)
2548                         nix_tm_free_resources(dev, 0, 0, false);
2549                 return rc;
2550         }
2551
2552         error->type = RTE_TM_ERROR_TYPE_NONE;
2553         dev->tm_flags |= NIX_TM_COMMITTED;
2554         return 0;
2555 }
2556
2557 static int
2558 otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
2559                                uint32_t node_id,
2560                                uint32_t profile_id,
2561                                struct rte_tm_error *error)
2562 {
2563         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2564         struct otx2_nix_tm_shaper_profile *profile = NULL;
2565         struct otx2_mbox *mbox = dev->mbox;
2566         struct otx2_nix_tm_node *tm_node;
2567         struct nix_txschq_config *req;
2568         uint8_t k;
2569         int rc;
2570
2571         tm_node = nix_tm_node_search(dev, node_id, true);
2572         if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
2573                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2574                 error->message = "invalid node";
2575                 return -EINVAL;
2576         }
2577
2578         if (profile_id == tm_node->params.shaper_profile_id)
2579                 return 0;
2580
2581         if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
2582                 profile = nix_tm_shaper_profile_search(dev, profile_id);
2583                 if (!profile) {
2584                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2585                         error->message = "shaper profile ID not exist";
2586                         return -EINVAL;
2587                 }
2588         }
2589
2590         if (profile && profile->params.packet_mode != tm_node->pkt_mode) {
2591                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
2592                 error->message = "shaper profile pkt mode mismatch";
2593                 return -EINVAL;
2594         }
2595
2596         tm_node->params.shaper_profile_id = profile_id;
2597
2598         /* Nothing to do if not yet committed */
2599         if (!(dev->tm_flags & NIX_TM_COMMITTED))
2600                 return 0;
2601
2602         tm_node->flags &= ~NIX_TM_NODE_ENABLED;
2603
2604         /* Flush the specific node with SW_XOFF */
2605         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2606         req->lvl = tm_node->hw_lvl;
2607         k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
2608         req->num_regs = k;
2609
2610         rc = send_tm_reqval(mbox, req, error);
2611         if (rc)
2612                 return rc;
2613
2614         shaper_default_red_algo(dev, tm_node, profile);
2615
2616         /* Update the PIR/CIR and clear SW XOFF */
2617         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
2618         req->lvl = tm_node->hw_lvl;
2619
2620         k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
2621
2622         k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
2623
2624         req->num_regs = k;
2625         rc = send_tm_reqval(mbox, req, error);
2626         if (!rc)
2627                 tm_node->flags |= NIX_TM_NODE_ENABLED;
2628         return rc;
2629 }
2630
2631 static int
2632 otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
2633                                uint32_t node_id, uint32_t new_parent_id,
2634                                uint32_t priority, uint32_t weight,
2635                                struct rte_tm_error *error)
2636 {
2637         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2638         struct otx2_nix_tm_node *tm_node, *sibling;
2639         struct otx2_nix_tm_node *new_parent;
2640         struct nix_txschq_config *req;
2641         uint8_t k;
2642         int rc;
2643
2644         if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
2645                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2646                 error->message = "hierarchy doesn't exist";
2647                 return -EINVAL;
2648         }
2649
2650         tm_node = nix_tm_node_search(dev, node_id, true);
2651         if (!tm_node) {
2652                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2653                 error->message = "no such node";
2654                 return -EINVAL;
2655         }
2656
2657         /* Parent id valid only for non root nodes */
2658         if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
2659                 new_parent = nix_tm_node_search(dev, new_parent_id, true);
2660                 if (!new_parent) {
2661                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2662                         error->message = "no such parent node";
2663                         return -EINVAL;
2664                 }
2665
2666                 /* Current support is only for dynamic weight update */
2667                 if (tm_node->parent != new_parent ||
2668                     tm_node->priority != priority) {
2669                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
2670                         error->message = "only weight update supported";
2671                         return -EINVAL;
2672                 }
2673         }
2674
2675         /* Skip if no change */
2676         if (tm_node->weight == weight)
2677                 return 0;
2678
2679         tm_node->weight = weight;
2680
2681         /* For leaf nodes, SQ CTX needs update */
2682         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2683                 /* Update SQ quantum data on the fly */
2684                 rc = nix_sq_sched_data(dev, tm_node, true);
2685                 if (rc) {
2686                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2687                         error->message = "sq sched data update failed";
2688                         return rc;
2689                 }
2690         } else {
2691                 /* XOFF Parent node */
2692                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2693                 req->lvl = tm_node->parent->hw_lvl;
2694                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
2695                                                    req->reg, req->regval);
2696                 rc = send_tm_reqval(dev->mbox, req, error);
2697                 if (rc)
2698                         return rc;
2699
2700                 /* XOFF this node and all other siblings */
2701                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2702                 req->lvl = tm_node->hw_lvl;
2703
2704                 k = 0;
2705                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2706                         if (sibling->parent != tm_node->parent)
2707                                 continue;
2708                         k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
2709                                                 &req->regval[k]);
2710                 }
2711                 req->num_regs = k;
2712                 rc = send_tm_reqval(dev->mbox, req, error);
2713                 if (rc)
2714                         return rc;
2715
2716                 /* Update new weight for current node */
2717                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2718                 req->lvl = tm_node->hw_lvl;
2719                 req->num_regs = prepare_tm_sched_reg(dev, tm_node,
2720                                                      req->reg, req->regval);
2721                 rc = send_tm_reqval(dev->mbox, req, error);
2722                 if (rc)
2723                         return rc;
2724
2725                 /* XON this node and all other siblings */
2726                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2727                 req->lvl = tm_node->hw_lvl;
2728
2729                 k = 0;
2730                 TAILQ_FOREACH(sibling, &dev->node_list, node) {
2731                         if (sibling->parent != tm_node->parent)
2732                                 continue;
2733                         k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
2734                                                 &req->regval[k]);
2735                 }
2736                 req->num_regs = k;
2737                 rc = send_tm_reqval(dev->mbox, req, error);
2738                 if (rc)
2739                         return rc;
2740
2741                 /* XON Parent node */
2742                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
2743                 req->lvl = tm_node->parent->hw_lvl;
2744                 req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
2745                                                    req->reg, req->regval);
2746                 rc = send_tm_reqval(dev->mbox, req, error);
2747                 if (rc)
2748                         return rc;
2749         }
2750         return 0;
2751 }
2752
2753 static int
2754 otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
2755                             struct rte_tm_node_stats *stats,
2756                             uint64_t *stats_mask, int clear,
2757                             struct rte_tm_error *error)
2758 {
2759         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2760         struct otx2_nix_tm_node *tm_node;
2761         uint64_t reg, val;
2762         int64_t *addr;
2763         int rc = 0;
2764
2765         tm_node = nix_tm_node_search(dev, node_id, true);
2766         if (!tm_node) {
2767                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2768                 error->message = "no such node";
2769                 return -EINVAL;
2770         }
2771
2772         /* Stats support only for leaf node or TL1 root */
2773         if (nix_tm_is_leaf(dev, tm_node->lvl)) {
2774                 reg = (((uint64_t)tm_node->id) << 32);
2775
2776                 /* Packets */
2777                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
2778                 val = otx2_atomic64_add_nosync(reg, addr);
2779                 if (val & OP_ERR)
2780                         val = 0;
2781                 stats->n_pkts = val - tm_node->last_pkts;
2782
2783                 /* Bytes */
2784                 addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
2785                 val = otx2_atomic64_add_nosync(reg, addr);
2786                 if (val & OP_ERR)
2787                         val = 0;
2788                 stats->n_bytes = val - tm_node->last_bytes;
2789
2790                 if (clear) {
2791                         tm_node->last_pkts = stats->n_pkts;
2792                         tm_node->last_bytes = stats->n_bytes;
2793                 }
2794
2795                 *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
2796
2797         } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
2798                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
2799                 error->message = "stats read error";
2800
2801                 /* RED Drop packets */
2802                 reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
2803                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2804                 if (rc)
2805                         goto exit;
2806                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
2807                                                 val - tm_node->last_pkts;
2808
2809                 /* RED Drop bytes */
2810                 reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
2811                 rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
2812                 if (rc)
2813                         goto exit;
2814                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
2815                                                 val - tm_node->last_bytes;
2816
2817                 /* Clear stats */
2818                 if (clear) {
2819                         tm_node->last_pkts =
2820                                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
2821                         tm_node->last_bytes =
2822                                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
2823                 }
2824
2825                 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
2826                         RTE_TM_STATS_N_BYTES_RED_DROPPED;
2827
2828         } else {
2829                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
2830                 error->message = "unsupported node";
2831                 rc = -EINVAL;
2832         }
2833
2834 exit:
2835         return rc;
2836 }
2837
2838 const struct rte_tm_ops otx2_tm_ops = {
2839         .node_type_get = otx2_nix_tm_node_type_get,
2840
2841         .capabilities_get = otx2_nix_tm_capa_get,
2842         .level_capabilities_get = otx2_nix_tm_level_capa_get,
2843         .node_capabilities_get = otx2_nix_tm_node_capa_get,
2844
2845         .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
2846         .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
2847
2848         .node_add = otx2_nix_tm_node_add,
2849         .node_delete = otx2_nix_tm_node_delete,
2850         .node_suspend = otx2_nix_tm_node_suspend,
2851         .node_resume = otx2_nix_tm_node_resume,
2852         .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
2853
2854         .node_shaper_update = otx2_nix_tm_node_shaper_update,
2855         .node_parent_update = otx2_nix_tm_node_parent_update,
2856         .node_stats_read = otx2_nix_tm_node_stats_read,
2857 };
2858
2859 static int
2860 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
2861 {
2862         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2863         uint32_t def = eth_dev->data->nb_tx_queues;
2864         struct rte_tm_node_params params;
2865         uint32_t leaf_parent, i;
2866         int rc = 0, leaf_level;
2867
2868         /* Default params */
2869         memset(&params, 0, sizeof(params));
2870         params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
2871
2872         if (nix_tm_have_tl1_access(dev)) {
2873                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
2874                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2875                                              DEFAULT_RR_WEIGHT,
2876                                              NIX_TXSCH_LVL_TL1,
2877                                              OTX2_TM_LVL_ROOT, false, &params);
2878                 if (rc)
2879                         goto exit;
2880                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2881                                              DEFAULT_RR_WEIGHT,
2882                                              NIX_TXSCH_LVL_TL2,
2883                                              OTX2_TM_LVL_SCH1, false, &params);
2884                 if (rc)
2885                         goto exit;
2886
2887                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2888                                              DEFAULT_RR_WEIGHT,
2889                                              NIX_TXSCH_LVL_TL3,
2890                                              OTX2_TM_LVL_SCH2, false, &params);
2891                 if (rc)
2892                         goto exit;
2893
2894                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2895                                              DEFAULT_RR_WEIGHT,
2896                                              NIX_TXSCH_LVL_TL4,
2897                                              OTX2_TM_LVL_SCH3, false, &params);
2898                 if (rc)
2899                         goto exit;
2900
2901                 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
2902                                              DEFAULT_RR_WEIGHT,
2903                                              NIX_TXSCH_LVL_SMQ,
2904                                              OTX2_TM_LVL_SCH4, false, &params);
2905                 if (rc)
2906                         goto exit;
2907
2908                 leaf_parent = def + 4;
2909                 leaf_level = OTX2_TM_LVL_QUEUE;
2910         } else {
2911                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
2912                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
2913                                              DEFAULT_RR_WEIGHT,
2914                                              NIX_TXSCH_LVL_TL2,
2915                                              OTX2_TM_LVL_ROOT, false, &params);
2916                 if (rc)
2917                         goto exit;
2918
2919                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
2920                                              DEFAULT_RR_WEIGHT,
2921                                              NIX_TXSCH_LVL_TL3,
2922                                              OTX2_TM_LVL_SCH1, false, &params);
2923                 if (rc)
2924                         goto exit;
2925
2926                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
2927                                              DEFAULT_RR_WEIGHT,
2928                                              NIX_TXSCH_LVL_TL4,
2929                                              OTX2_TM_LVL_SCH2, false, &params);
2930                 if (rc)
2931                         goto exit;
2932
2933                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
2934                                              DEFAULT_RR_WEIGHT,
2935                                              NIX_TXSCH_LVL_SMQ,
2936                                              OTX2_TM_LVL_SCH3, false, &params);
2937                 if (rc)
2938                         goto exit;
2939
2940                 leaf_parent = def + 3;
2941                 leaf_level = OTX2_TM_LVL_SCH4;
2942         }
2943
2944         /* Add leaf nodes */
2945         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2946                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
2947                                              DEFAULT_RR_WEIGHT,
2948                                              NIX_TXSCH_LVL_CNT,
2949                                              leaf_level, false, &params);
2950                 if (rc)
2951                         break;
2952         }
2953
2954 exit:
2955         return rc;
2956 }
2957
2958 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
2959 {
2960         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2961
2962         TAILQ_INIT(&dev->node_list);
2963         TAILQ_INIT(&dev->shaper_profile_list);
2964         dev->tm_rate_min = 1E9; /* 1Gbps */
2965 }
2966
2967 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
2968 {
2969         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2970         struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
2971         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
2972         int rc;
2973
2974         /* Free up all resources already held */
2975         rc = nix_tm_free_resources(dev, 0, 0, false);
2976         if (rc) {
2977                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
2978                 return rc;
2979         }
2980
2981         /* Clear shaper profiles */
2982         nix_tm_clear_shaper_profiles(dev);
2983         dev->tm_flags = NIX_TM_DEFAULT_TREE;
2984
2985         /* Disable TL1 Static Priority when VF's are enabled
2986          * as otherwise VF's TL2 reallocation will be needed
2987          * runtime to support a specific topology of PF.
2988          */
2989         if (pci_dev->max_vfs)
2990                 dev->tm_flags |= NIX_TM_TL1_NO_SP;
2991
2992         rc = nix_tm_prepare_default_tree(eth_dev);
2993         if (rc != 0)
2994                 return rc;
2995
2996         rc = nix_tm_alloc_resources(eth_dev, false);
2997         if (rc != 0)
2998                 return rc;
2999         dev->tm_leaf_cnt = sq_cnt;
3000
3001         return 0;
3002 }
3003
3004 static int
3005 nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
3006 {
3007         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3008         uint32_t def = eth_dev->data->nb_tx_queues;
3009         struct rte_tm_node_params params;
3010         uint32_t leaf_parent, i, rc = 0;
3011
3012         memset(&params, 0, sizeof(params));
3013
3014         if (nix_tm_have_tl1_access(dev)) {
3015                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
3016                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
3017                                         DEFAULT_RR_WEIGHT,
3018                                         NIX_TXSCH_LVL_TL1,
3019                                         OTX2_TM_LVL_ROOT, false, &params);
3020                 if (rc)
3021                         goto error;
3022                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
3023                                         DEFAULT_RR_WEIGHT,
3024                                         NIX_TXSCH_LVL_TL2,
3025                                         OTX2_TM_LVL_SCH1, false, &params);
3026                 if (rc)
3027                         goto error;
3028                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
3029                                         DEFAULT_RR_WEIGHT,
3030                                         NIX_TXSCH_LVL_TL3,
3031                                         OTX2_TM_LVL_SCH2, false, &params);
3032                 if (rc)
3033                         goto error;
3034                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
3035                                         DEFAULT_RR_WEIGHT,
3036                                         NIX_TXSCH_LVL_TL4,
3037                                         OTX2_TM_LVL_SCH3, false, &params);
3038                 if (rc)
3039                         goto error;
3040                 leaf_parent = def + 3;
3041
3042                 /* Add per queue SMQ nodes */
3043                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3044                         rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
3045                                                 leaf_parent,
3046                                                 0, DEFAULT_RR_WEIGHT,
3047                                                 NIX_TXSCH_LVL_SMQ,
3048                                                 OTX2_TM_LVL_SCH4,
3049                                                 false, &params);
3050                         if (rc)
3051                                 goto error;
3052                 }
3053
3054                 /* Add leaf nodes */
3055                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3056                         rc = nix_tm_node_add_to_list(dev, i,
3057                                                      leaf_parent + 1 + i, 0,
3058                                                      DEFAULT_RR_WEIGHT,
3059                                                      NIX_TXSCH_LVL_CNT,
3060                                                      OTX2_TM_LVL_QUEUE,
3061                                                      false, &params);
3062                 if (rc)
3063                         goto error;
3064                 }
3065
3066                 return 0;
3067         }
3068
3069         dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
3070         rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
3071                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
3072                                 OTX2_TM_LVL_ROOT, false, &params);
3073         if (rc)
3074                 goto error;
3075         rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
3076                                 DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
3077                                 OTX2_TM_LVL_SCH1, false, &params);
3078         if (rc)
3079                 goto error;
3080         rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
3081                                      DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
3082                                      OTX2_TM_LVL_SCH2, false, &params);
3083         if (rc)
3084                 goto error;
3085         leaf_parent = def + 2;
3086
3087         /* Add per queue SMQ nodes */
3088         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3089                 rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
3090                                              leaf_parent,
3091                                              0, DEFAULT_RR_WEIGHT,
3092                                              NIX_TXSCH_LVL_SMQ,
3093                                              OTX2_TM_LVL_SCH3,
3094                                              false, &params);
3095                 if (rc)
3096                         goto error;
3097         }
3098
3099         /* Add leaf nodes */
3100         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3101                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
3102                                              DEFAULT_RR_WEIGHT,
3103                                              NIX_TXSCH_LVL_CNT,
3104                                              OTX2_TM_LVL_SCH4,
3105                                              false, &params);
3106                 if (rc)
3107                         break;
3108         }
3109 error:
3110         return rc;
3111 }
3112
3113 static int
3114 otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
3115                            struct otx2_nix_tm_node *tm_node,
3116                            uint64_t tx_rate)
3117 {
3118         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3119         struct otx2_nix_tm_shaper_profile profile;
3120         struct otx2_mbox *mbox = dev->mbox;
3121         volatile uint64_t *reg, *regval;
3122         struct nix_txschq_config *req;
3123         uint16_t flags;
3124         uint8_t k = 0;
3125         int rc;
3126
3127         flags = tm_node->flags;
3128
3129         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
3130         req->lvl = NIX_TXSCH_LVL_MDQ;
3131         reg = req->reg;
3132         regval = req->regval;
3133
3134         if (tx_rate == 0) {
3135                 k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
3136                 flags &= ~NIX_TM_NODE_ENABLED;
3137                 goto exit;
3138         }
3139
3140         if (!(flags & NIX_TM_NODE_ENABLED)) {
3141                 k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
3142                 flags |= NIX_TM_NODE_ENABLED;
3143         }
3144
3145         /* Use only PIR for rate limit */
3146         memset(&profile, 0, sizeof(profile));
3147         profile.params.peak.rate = tx_rate;
3148         /* Minimum burst of ~4us Bytes of Tx */
3149         profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
3150                                            (4ull * tx_rate) / (1E6 * 8));
3151         if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
3152                 dev->tm_rate_min = tx_rate;
3153
3154         k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
3155 exit:
3156         req->num_regs = k;
3157         rc = otx2_mbox_process(mbox);
3158         if (rc)
3159                 return rc;
3160
3161         tm_node->flags = flags;
3162         return 0;
3163 }
3164
3165 int
3166 otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
3167                                 uint16_t queue_idx, uint16_t tx_rate_mbps)
3168 {
3169         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3170         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
3171         struct otx2_nix_tm_node *tm_node;
3172         int rc;
3173
3174         /* Check for supported revisions */
3175         if (otx2_dev_is_95xx_Ax(dev) ||
3176             otx2_dev_is_96xx_Ax(dev))
3177                 return -EINVAL;
3178
3179         if (queue_idx >= eth_dev->data->nb_tx_queues)
3180                 return -EINVAL;
3181
3182         if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3183             !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
3184                 goto error;
3185
3186         if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
3187             eth_dev->data->nb_tx_queues > 1) {
3188                 /* For TM topology change ethdev needs to be stopped */
3189                 if (eth_dev->data->dev_started)
3190                         return -EBUSY;
3191
3192                 /*
3193                  * Disable xmit will be enabled when
3194                  * new topology is available.
3195                  */
3196                 rc = nix_xmit_disable(eth_dev);
3197                 if (rc) {
3198                         otx2_err("failed to disable TX, rc=%d", rc);
3199                         return -EIO;
3200                 }
3201
3202                 rc = nix_tm_free_resources(dev, 0, 0, false);
3203                 if (rc < 0) {
3204                         otx2_tm_dbg("failed to free default resources, rc %d",
3205                                    rc);
3206                         return -EIO;
3207                 }
3208
3209                 rc = nix_tm_prepare_rate_limited_tree(eth_dev);
3210                 if (rc < 0) {
3211                         otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
3212                         return rc;
3213                 }
3214
3215                 rc = nix_tm_alloc_resources(eth_dev, true);
3216                 if (rc != 0) {
3217                         otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
3218                         return rc;
3219                 }
3220
3221                 dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
3222                 dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
3223         }
3224
3225         tm_node = nix_tm_node_search(dev, queue_idx, false);
3226
3227         /* check if we found a valid leaf node */
3228         if (!tm_node ||
3229             !nix_tm_is_leaf(dev, tm_node->lvl) ||
3230             !tm_node->parent ||
3231             tm_node->parent->hw_id == UINT32_MAX)
3232                 return -EIO;
3233
3234         return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
3235 error:
3236         otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
3237         return -EINVAL;
3238 }
3239
3240 int
3241 otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
3242 {
3243         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3244
3245         if (!arg)
3246                 return -EINVAL;
3247
3248         /* Check for supported revisions */
3249         if (otx2_dev_is_95xx_Ax(dev) ||
3250             otx2_dev_is_96xx_Ax(dev))
3251                 return -EINVAL;
3252
3253         *(const void **)arg = &otx2_tm_ops;
3254
3255         return 0;
3256 }
3257
3258 int
3259 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
3260 {
3261         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
3262         int rc;
3263
3264         /* Xmit is assumed to be disabled */
3265         /* Free up resources already held */
3266         rc = nix_tm_free_resources(dev, 0, 0, false);
3267         if (rc) {
3268                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
3269                 return rc;
3270         }
3271
3272         /* Clear shaper profiles */
3273         nix_tm_clear_shaper_profiles(dev);
3274
3275         dev->tm_flags = 0;
3276         return 0;
3277 }
3278
3279 int
3280 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
3281                           uint32_t *rr_quantum, uint16_t *smq)
3282 {
3283         struct otx2_nix_tm_node *tm_node;
3284         int rc;
3285
3286         /* 0..sq_cnt-1 are leaf nodes */
3287         if (sq >= dev->tm_leaf_cnt)
3288                 return -EINVAL;
3289
3290         /* Search for internal node first */
3291         tm_node = nix_tm_node_search(dev, sq, false);
3292         if (!tm_node)
3293                 tm_node = nix_tm_node_search(dev, sq, true);
3294
3295         /* Check if we found a valid leaf node */
3296         if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
3297             !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
3298                 return -EIO;
3299         }
3300
3301         /* Get SMQ Id of leaf node's parent */
3302         *smq = tm_node->parent->hw_id;
3303         *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
3304
3305         rc = nix_smq_xoff(dev, tm_node->parent, false);
3306         if (rc)
3307                 return rc;
3308         tm_node->flags |= NIX_TM_NODE_ENABLED;
3309
3310         return 0;
3311 }