108f44ca700ea8973becbb316093866f46b71268
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_tm.h"
9
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
12
13 enum otx2_tm_node_level {
14         OTX2_TM_LVL_ROOT = 0,
15         OTX2_TM_LVL_SCH1,
16         OTX2_TM_LVL_SCH2,
17         OTX2_TM_LVL_SCH3,
18         OTX2_TM_LVL_SCH4,
19         OTX2_TM_LVL_QUEUE,
20         OTX2_TM_LVL_MAX,
21 };
22
23 static inline
24 uint64_t shaper2regval(struct shaper_params *shaper)
25 {
26         return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27                 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28                 (shaper->mantissa << 1);
29 }
30
31 static int
32 nix_get_link(struct otx2_eth_dev *dev)
33 {
34         int link = 13 /* SDP */;
35         uint16_t lmac_chan;
36         uint16_t map;
37
38         lmac_chan = dev->tx_chan_base;
39
40         /* CGX lmac link */
41         if (lmac_chan >= 0x800) {
42                 map = lmac_chan & 0x7FF;
43                 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44         } else if (lmac_chan < 0x700) {
45                 /* LBK channel */
46                 link = 12;
47         }
48
49         return link;
50 }
51
52 static uint8_t
53 nix_get_relchan(struct otx2_eth_dev *dev)
54 {
55         return dev->tx_chan_base & 0xff;
56 }
57
58 static bool
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
60 {
61         bool is_lbk = otx2_dev_is_lbk(dev);
62         return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) &&
63                 !is_lbk && !dev->maxvf;
64 }
65
66 static int
67 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
68 {
69         struct otx2_nix_tm_node *child_node;
70
71         TAILQ_FOREACH(child_node, &dev->node_list, node) {
72                 if (!child_node->parent)
73                         continue;
74                 if (!(child_node->parent->id == node_id))
75                         continue;
76                 if (child_node->priority == child_node->parent->rr_prio)
77                         continue;
78                 return child_node->hw_id - child_node->priority;
79         }
80         return 0;
81 }
82
83
84 static struct otx2_nix_tm_shaper_profile *
85 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
86 {
87         struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
88
89         TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
90                 if (tm_shaper_profile->shaper_profile_id == shaper_id)
91                         return tm_shaper_profile;
92         }
93         return NULL;
94 }
95
96 static inline uint64_t
97 shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
98                    uint64_t *mantissa_p, uint64_t *div_exp_p)
99 {
100         uint64_t div_exp, exponent, mantissa;
101
102         /* Boundary checks */
103         if (value < MIN_SHAPER_RATE ||
104             value > MAX_SHAPER_RATE)
105                 return 0;
106
107         if (value <= SHAPER_RATE(0, 0, 0)) {
108                 /* Calculate rate div_exp and mantissa using
109                  * the following formula:
110                  *
111                  * value = (2E6 * (256 + mantissa)
112                  *              / ((1 << div_exp) * 256))
113                  */
114                 div_exp = 0;
115                 exponent = 0;
116                 mantissa = MAX_RATE_MANTISSA;
117
118                 while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
119                         div_exp += 1;
120
121                 while (value <
122                        ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
123                         ((1 << div_exp) * 256)))
124                         mantissa -= 1;
125         } else {
126                 /* Calculate rate exponent and mantissa using
127                  * the following formula:
128                  *
129                  * value = (2E6 * ((256 + mantissa) << exponent)) / 256
130                  *
131                  */
132                 div_exp = 0;
133                 exponent = MAX_RATE_EXPONENT;
134                 mantissa = MAX_RATE_MANTISSA;
135
136                 while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
137                         exponent -= 1;
138
139                 while (value < ((NIX_SHAPER_RATE_CONST *
140                                 ((256 + mantissa) << exponent)) / 256))
141                         mantissa -= 1;
142         }
143
144         if (div_exp > MAX_RATE_DIV_EXP ||
145             exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
146                 return 0;
147
148         if (div_exp_p)
149                 *div_exp_p = div_exp;
150         if (exponent_p)
151                 *exponent_p = exponent;
152         if (mantissa_p)
153                 *mantissa_p = mantissa;
154
155         /* Calculate real rate value */
156         return SHAPER_RATE(exponent, mantissa, div_exp);
157 }
158
159 static inline uint64_t
160 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
161                     uint64_t *mantissa_p)
162 {
163         uint64_t exponent, mantissa;
164
165         if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
166                 return 0;
167
168         /* Calculate burst exponent and mantissa using
169          * the following formula:
170          *
171          * value = (((256 + mantissa) << (exponent + 1)
172          / 256)
173          *
174          */
175         exponent = MAX_BURST_EXPONENT;
176         mantissa = MAX_BURST_MANTISSA;
177
178         while (value < (1ull << (exponent + 1)))
179                 exponent -= 1;
180
181         while (value < ((256 + mantissa) << (exponent + 1)) / 256)
182                 mantissa -= 1;
183
184         if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
185                 return 0;
186
187         if (exponent_p)
188                 *exponent_p = exponent;
189         if (mantissa_p)
190                 *mantissa_p = mantissa;
191
192         return SHAPER_BURST(exponent, mantissa);
193 }
194
195 static void
196 shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
197                      struct shaper_params *cir,
198                      struct shaper_params *pir)
199 {
200         struct rte_tm_shaper_params *param = &profile->params;
201
202         if (!profile)
203                 return;
204
205         /* Calculate CIR exponent and mantissa */
206         if (param->committed.rate)
207                 cir->rate = shaper_rate_to_nix(param->committed.rate,
208                                                &cir->exponent,
209                                                &cir->mantissa,
210                                                &cir->div_exp);
211
212         /* Calculate PIR exponent and mantissa */
213         if (param->peak.rate)
214                 pir->rate = shaper_rate_to_nix(param->peak.rate,
215                                                &pir->exponent,
216                                                &pir->mantissa,
217                                                &pir->div_exp);
218
219         /* Calculate CIR burst exponent and mantissa */
220         if (param->committed.size)
221                 cir->burst = shaper_burst_to_nix(param->committed.size,
222                                                  &cir->burst_exponent,
223                                                  &cir->burst_mantissa);
224
225         /* Calculate PIR burst exponent and mantissa */
226         if (param->peak.size)
227                 pir->burst = shaper_burst_to_nix(param->peak.size,
228                                                  &pir->burst_exponent,
229                                                  &pir->burst_mantissa);
230 }
231
232 static int
233 populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
234 {
235         struct otx2_mbox *mbox = dev->mbox;
236         struct nix_txschq_config *req;
237
238         /*
239          * Default config for TL1.
240          * For VF this is always ignored.
241          */
242
243         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
244         req->lvl = NIX_TXSCH_LVL_TL1;
245
246         /* Set DWRR quantum */
247         req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
248         req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
249         req->num_regs++;
250
251         req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
252         req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
253         req->num_regs++;
254
255         req->reg[2] = NIX_AF_TL1X_CIR(schq);
256         req->regval[2] = 0;
257         req->num_regs++;
258
259         return otx2_mbox_process(mbox);
260 }
261
262 static uint8_t
263 prepare_tm_sched_reg(struct otx2_eth_dev *dev,
264                      struct otx2_nix_tm_node *tm_node,
265                      volatile uint64_t *reg, volatile uint64_t *regval)
266 {
267         uint64_t strict_prio = tm_node->priority;
268         uint32_t hw_lvl = tm_node->hw_lvl;
269         uint32_t schq = tm_node->hw_id;
270         uint64_t rr_quantum;
271         uint8_t k = 0;
272
273         rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
274
275         /* For children to root, strict prio is default if either
276          * device root is TL2 or TL1 Static Priority is disabled.
277          */
278         if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
279             (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
280              dev->tm_flags & NIX_TM_TL1_NO_SP))
281                 strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
282
283         otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
284                      "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
285                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
286                      tm_node->id, strict_prio, rr_quantum, tm_node);
287
288         switch (hw_lvl) {
289         case NIX_TXSCH_LVL_SMQ:
290                 reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
291                 regval[k] = (strict_prio << 24) | rr_quantum;
292                 k++;
293
294                 break;
295         case NIX_TXSCH_LVL_TL4:
296                 reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
297                 regval[k] = (strict_prio << 24) | rr_quantum;
298                 k++;
299
300                 break;
301         case NIX_TXSCH_LVL_TL3:
302                 reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
303                 regval[k] = (strict_prio << 24) | rr_quantum;
304                 k++;
305
306                 break;
307         case NIX_TXSCH_LVL_TL2:
308                 reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
309                 regval[k] = (strict_prio << 24) | rr_quantum;
310                 k++;
311
312                 break;
313         case NIX_TXSCH_LVL_TL1:
314                 reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
315                 regval[k] = rr_quantum;
316                 k++;
317
318                 break;
319         }
320
321         return k;
322 }
323
324 static uint8_t
325 prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
326                       struct otx2_nix_tm_shaper_profile *profile,
327                       volatile uint64_t *reg, volatile uint64_t *regval)
328 {
329         struct shaper_params cir, pir;
330         uint32_t schq = tm_node->hw_id;
331         uint8_t k = 0;
332
333         memset(&cir, 0, sizeof(cir));
334         memset(&pir, 0, sizeof(pir));
335         shaper_config_to_nix(profile, &cir, &pir);
336
337         otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
338                     "pir %" PRIu64 "(%" PRIu64 "B),"
339                      " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
340                      nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
341                      tm_node->id, pir.rate, pir.burst,
342                      cir.rate, cir.burst, tm_node);
343
344         switch (tm_node->hw_lvl) {
345         case NIX_TXSCH_LVL_SMQ:
346                 /* Configure PIR, CIR */
347                 reg[k] = NIX_AF_MDQX_PIR(schq);
348                 regval[k] = (pir.rate && pir.burst) ?
349                                 (shaper2regval(&pir) | 1) : 0;
350                 k++;
351
352                 reg[k] = NIX_AF_MDQX_CIR(schq);
353                 regval[k] = (cir.rate && cir.burst) ?
354                                 (shaper2regval(&cir) | 1) : 0;
355                 k++;
356
357                 /* Configure RED ALG */
358                 reg[k] = NIX_AF_MDQX_SHAPE(schq);
359                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
360                 k++;
361                 break;
362         case NIX_TXSCH_LVL_TL4:
363                 /* Configure PIR, CIR */
364                 reg[k] = NIX_AF_TL4X_PIR(schq);
365                 regval[k] = (pir.rate && pir.burst) ?
366                                 (shaper2regval(&pir) | 1) : 0;
367                 k++;
368
369                 reg[k] = NIX_AF_TL4X_CIR(schq);
370                 regval[k] = (cir.rate && cir.burst) ?
371                                 (shaper2regval(&cir) | 1) : 0;
372                 k++;
373
374                 /* Configure RED algo */
375                 reg[k] = NIX_AF_TL4X_SHAPE(schq);
376                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
377                 k++;
378                 break;
379         case NIX_TXSCH_LVL_TL3:
380                 /* Configure PIR, CIR */
381                 reg[k] = NIX_AF_TL3X_PIR(schq);
382                 regval[k] = (pir.rate && pir.burst) ?
383                                 (shaper2regval(&pir) | 1) : 0;
384                 k++;
385
386                 reg[k] = NIX_AF_TL3X_CIR(schq);
387                 regval[k] = (cir.rate && cir.burst) ?
388                                 (shaper2regval(&cir) | 1) : 0;
389                 k++;
390
391                 /* Configure RED algo */
392                 reg[k] = NIX_AF_TL3X_SHAPE(schq);
393                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
394                 k++;
395
396                 break;
397         case NIX_TXSCH_LVL_TL2:
398                 /* Configure PIR, CIR */
399                 reg[k] = NIX_AF_TL2X_PIR(schq);
400                 regval[k] = (pir.rate && pir.burst) ?
401                                 (shaper2regval(&pir) | 1) : 0;
402                 k++;
403
404                 reg[k] = NIX_AF_TL2X_CIR(schq);
405                 regval[k] = (cir.rate && cir.burst) ?
406                                 (shaper2regval(&cir) | 1) : 0;
407                 k++;
408
409                 /* Configure RED algo */
410                 reg[k] = NIX_AF_TL2X_SHAPE(schq);
411                 regval[k] = ((uint64_t)tm_node->red_algo << 9);
412                 k++;
413
414                 break;
415         case NIX_TXSCH_LVL_TL1:
416                 /* Configure CIR */
417                 reg[k] = NIX_AF_TL1X_CIR(schq);
418                 regval[k] = (cir.rate && cir.burst) ?
419                                 (shaper2regval(&cir) | 1) : 0;
420                 k++;
421                 break;
422         }
423
424         return k;
425 }
426
427 static int
428 populate_tm_reg(struct otx2_eth_dev *dev,
429                 struct otx2_nix_tm_node *tm_node)
430 {
431         struct otx2_nix_tm_shaper_profile *profile;
432         uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
433         uint64_t regval[MAX_REGS_PER_MBOX_MSG];
434         uint64_t reg[MAX_REGS_PER_MBOX_MSG];
435         struct otx2_mbox *mbox = dev->mbox;
436         uint64_t parent = 0, child = 0;
437         uint32_t hw_lvl, rr_prio, schq;
438         struct nix_txschq_config *req;
439         int rc = -EFAULT;
440         uint8_t k = 0;
441
442         memset(regval_mask, 0, sizeof(regval_mask));
443         profile = nix_tm_shaper_profile_search(dev,
444                                         tm_node->params.shaper_profile_id);
445         rr_prio = tm_node->rr_prio;
446         hw_lvl = tm_node->hw_lvl;
447         schq = tm_node->hw_id;
448
449         /* Root node will not have a parent node */
450         if (hw_lvl == dev->otx2_tm_root_lvl)
451                 parent = tm_node->parent_hw_id;
452         else
453                 parent = tm_node->parent->hw_id;
454
455         /* Do we need this trigger to configure TL1 */
456         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
457             hw_lvl == dev->otx2_tm_root_lvl) {
458                 rc = populate_tm_tl1_default(dev, parent);
459                 if (rc)
460                         goto error;
461         }
462
463         if (hw_lvl != NIX_TXSCH_LVL_SMQ)
464                 child = find_prio_anchor(dev, tm_node->id);
465
466         /* Override default rr_prio when TL1
467          * Static Priority is disabled
468          */
469         if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
470             dev->tm_flags & NIX_TM_TL1_NO_SP) {
471                 rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
472                 child = 0;
473         }
474
475         otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
476                     " prio_anchor %"PRIu64" rr_prio %u (%p)",
477                     nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
478                     parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
479
480         /* Prepare Topology and Link config */
481         switch (hw_lvl) {
482         case NIX_TXSCH_LVL_SMQ:
483
484                 /* Set xoff which will be cleared later */
485                 reg[k] = NIX_AF_SMQX_CFG(schq);
486                 regval[k] = BIT_ULL(50);
487                 regval_mask[k] = ~BIT_ULL(50);
488                 k++;
489
490                 /* Parent and schedule conf */
491                 reg[k] = NIX_AF_MDQX_PARENT(schq);
492                 regval[k] = parent << 16;
493                 k++;
494
495                 break;
496         case NIX_TXSCH_LVL_TL4:
497                 /* Parent and schedule conf */
498                 reg[k] = NIX_AF_TL4X_PARENT(schq);
499                 regval[k] = parent << 16;
500                 k++;
501
502                 reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
503                 regval[k] = (child << 32) | (rr_prio << 1);
504                 k++;
505
506                 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
507                 if (otx2_dev_is_sdp(dev)) {
508                         reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
509                         regval[k] = BIT_ULL(12);
510                         k++;
511                 }
512                 break;
513         case NIX_TXSCH_LVL_TL3:
514                 /* Parent and schedule conf */
515                 reg[k] = NIX_AF_TL3X_PARENT(schq);
516                 regval[k] = parent << 16;
517                 k++;
518
519                 reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
520                 regval[k] = (child << 32) | (rr_prio << 1);
521                 k++;
522
523                 /* Link configuration */
524                 if (!otx2_dev_is_sdp(dev) &&
525                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
526                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
527                                                 nix_get_link(dev));
528                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
529                         k++;
530                 }
531
532                 break;
533         case NIX_TXSCH_LVL_TL2:
534                 /* Parent and schedule conf */
535                 reg[k] = NIX_AF_TL2X_PARENT(schq);
536                 regval[k] = parent << 16;
537                 k++;
538
539                 reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
540                 regval[k] = (child << 32) | (rr_prio << 1);
541                 k++;
542
543                 /* Link configuration */
544                 if (!otx2_dev_is_sdp(dev) &&
545                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
546                         reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
547                                                 nix_get_link(dev));
548                         regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
549                         k++;
550                 }
551
552                 break;
553         case NIX_TXSCH_LVL_TL1:
554                 reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
555                 regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
556                 k++;
557
558                 break;
559         }
560
561         /* Prepare schedule config */
562         k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
563
564         /* Prepare shaping config */
565         k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
566
567         if (!k)
568                 return 0;
569
570         /* Copy and send config mbox */
571         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
572         req->lvl = hw_lvl;
573         req->num_regs = k;
574
575         otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
576         otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
577         otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
578
579         rc = otx2_mbox_process(mbox);
580         if (rc)
581                 goto error;
582
583         return 0;
584 error:
585         otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
586         return rc;
587 }
588
589
590 static int
591 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
592 {
593         struct otx2_nix_tm_node *tm_node;
594         uint32_t hw_lvl;
595         int rc = 0;
596
597         for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
598                 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
599                         if (tm_node->hw_lvl == hw_lvl &&
600                             tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
601                                 rc = populate_tm_reg(dev, tm_node);
602                                 if (rc)
603                                         goto exit;
604                         }
605                 }
606         }
607 exit:
608         return rc;
609 }
610
611 static struct otx2_nix_tm_node *
612 nix_tm_node_search(struct otx2_eth_dev *dev,
613                    uint32_t node_id, bool user)
614 {
615         struct otx2_nix_tm_node *tm_node;
616
617         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
618                 if (tm_node->id == node_id &&
619                     (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
620                         return tm_node;
621         }
622         return NULL;
623 }
624
625 static uint32_t
626 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
627 {
628         struct otx2_nix_tm_node *tm_node;
629         uint32_t rr_num = 0;
630
631         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
632                 if (!tm_node->parent)
633                         continue;
634
635                 if (!(tm_node->parent->id == parent_id))
636                         continue;
637
638                 if (tm_node->priority == priority)
639                         rr_num++;
640         }
641         return rr_num;
642 }
643
644 static int
645 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
646 {
647         struct otx2_nix_tm_node *tm_node_child;
648         struct otx2_nix_tm_node *tm_node;
649         struct otx2_nix_tm_node *parent;
650         uint32_t rr_num = 0;
651         uint32_t priority;
652
653         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
654                 if (!tm_node->parent)
655                         continue;
656                 /* Count group of children of same priority i.e are RR */
657                 parent = tm_node->parent;
658                 priority = tm_node->priority;
659                 rr_num = check_rr(dev, priority, parent->id);
660
661                 /* Assuming that multiple RR groups are
662                  * not configured based on capability.
663                  */
664                 if (rr_num > 1) {
665                         parent->rr_prio = priority;
666                         parent->rr_num = rr_num;
667                 }
668
669                 /* Find out static priority children that are not in RR */
670                 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
671                         if (!tm_node_child->parent)
672                                 continue;
673                         if (parent->id != tm_node_child->parent->id)
674                                 continue;
675                         if (parent->max_prio == UINT32_MAX &&
676                             tm_node_child->priority != parent->rr_prio)
677                                 parent->max_prio = 0;
678
679                         if (parent->max_prio < tm_node_child->priority &&
680                             parent->rr_prio != tm_node_child->priority)
681                                 parent->max_prio = tm_node_child->priority;
682                 }
683         }
684
685         return 0;
686 }
687
688 static int
689 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
690                         uint32_t parent_node_id, uint32_t priority,
691                         uint32_t weight, uint16_t hw_lvl,
692                         uint16_t lvl, bool user,
693                         struct rte_tm_node_params *params)
694 {
695         struct otx2_nix_tm_shaper_profile *shaper_profile;
696         struct otx2_nix_tm_node *tm_node, *parent_node;
697         uint32_t shaper_profile_id;
698
699         shaper_profile_id = params->shaper_profile_id;
700         shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);
701
702         parent_node = nix_tm_node_search(dev, parent_node_id, user);
703
704         tm_node = rte_zmalloc("otx2_nix_tm_node",
705                               sizeof(struct otx2_nix_tm_node), 0);
706         if (!tm_node)
707                 return -ENOMEM;
708
709         tm_node->lvl = lvl;
710         tm_node->hw_lvl = hw_lvl;
711
712         tm_node->id = node_id;
713         tm_node->priority = priority;
714         tm_node->weight = weight;
715         tm_node->rr_prio = 0xf;
716         tm_node->max_prio = UINT32_MAX;
717         tm_node->hw_id = UINT32_MAX;
718         tm_node->flags = 0;
719         if (user)
720                 tm_node->flags = NIX_TM_NODE_USER;
721         rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
722
723         if (shaper_profile)
724                 shaper_profile->reference_count++;
725         tm_node->parent = parent_node;
726         tm_node->parent_hw_id = UINT32_MAX;
727
728         TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
729
730         return 0;
731 }
732
733 static int
734 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
735 {
736         struct otx2_nix_tm_shaper_profile *shaper_profile;
737
738         while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
739                 if (shaper_profile->reference_count)
740                         otx2_tm_dbg("Shaper profile %u has non zero references",
741                                     shaper_profile->shaper_profile_id);
742                 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
743                 rte_free(shaper_profile);
744         }
745
746         return 0;
747 }
748
749 static int
750 nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable)
751 {
752         struct otx2_mbox *mbox = dev->mbox;
753         struct nix_txschq_config *req;
754
755         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
756         req->lvl = NIX_TXSCH_LVL_SMQ;
757         req->num_regs = 1;
758
759         req->reg[0] = NIX_AF_SMQX_CFG(smq);
760         /* Unmodified fields */
761         req->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) |
762                                 (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
763
764         if (enable)
765                 req->regval[0] |= BIT_ULL(50) | BIT_ULL(49);
766         else
767                 req->regval[0] |= 0;
768
769         return otx2_mbox_process(mbox);
770 }
771
772 int
773 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
774 {
775         struct otx2_eth_txq *txq = __txq;
776         struct npa_aq_enq_req *req;
777         struct npa_aq_enq_rsp *rsp;
778         struct otx2_npa_lf *lf;
779         struct otx2_mbox *mbox;
780         uint64_t aura_handle;
781         int rc;
782
783         lf = otx2_npa_lf_obj_get();
784         if (!lf)
785                 return -EFAULT;
786         mbox = lf->mbox;
787         /* Set/clear sqb aura fc_ena */
788         aura_handle = txq->sqb_pool->pool_id;
789         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
790
791         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
792         req->ctype = NPA_AQ_CTYPE_AURA;
793         req->op = NPA_AQ_INSTOP_WRITE;
794         /* Below is not needed for aura writes but AF driver needs it */
795         /* AF will translate to associated poolctx */
796         req->aura.pool_addr = req->aura_id;
797
798         req->aura.fc_ena = enable;
799         req->aura_mask.fc_ena = 1;
800
801         rc = otx2_mbox_process(mbox);
802         if (rc)
803                 return rc;
804
805         /* Read back npa aura ctx */
806         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
807
808         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
809         req->ctype = NPA_AQ_CTYPE_AURA;
810         req->op = NPA_AQ_INSTOP_READ;
811
812         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
813         if (rc)
814                 return rc;
815
816         /* Init when enabled as there might be no triggers */
817         if (enable)
818                 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
819         else
820                 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
821         /* Sync write barrier */
822         rte_wmb();
823
824         return 0;
825 }
826
827 static void
828 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
829 {
830         uint16_t sqb_cnt, head_off, tail_off;
831         struct otx2_eth_dev *dev = txq->dev;
832         uint16_t sq = txq->sq;
833         uint64_t reg, val;
834         int64_t *regaddr;
835
836         while (true) {
837                 reg = ((uint64_t)sq << 32);
838                 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
839                 val = otx2_atomic64_add_nosync(reg, regaddr);
840
841                 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
842                 val = otx2_atomic64_add_nosync(reg, regaddr);
843                 sqb_cnt = val & 0xFFFF;
844                 head_off = (val >> 20) & 0x3F;
845                 tail_off = (val >> 28) & 0x3F;
846
847                 /* SQ reached quiescent state */
848                 if (sqb_cnt <= 1 && head_off == tail_off &&
849                     (*txq->fc_mem == txq->nb_sqb_bufs)) {
850                         break;
851                 }
852
853                 rte_pause();
854         }
855 }
856
857 int
858 otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)
859 {
860         struct otx2_eth_txq *txq = __txq;
861         struct otx2_eth_dev *dev = txq->dev;
862         struct otx2_mbox *mbox = dev->mbox;
863         struct nix_aq_enq_req *req;
864         struct nix_aq_enq_rsp *rsp;
865         uint16_t smq;
866         int rc;
867
868         /* Get smq from sq */
869         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
870         req->qidx = txq->sq;
871         req->ctype = NIX_AQ_CTYPE_SQ;
872         req->op = NIX_AQ_INSTOP_READ;
873         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
874         if (rc) {
875                 otx2_err("Failed to get smq, rc=%d", rc);
876                 return -EIO;
877         }
878
879         /* Check if sq is enabled */
880         if (!rsp->sq.ena)
881                 return 0;
882
883         smq = rsp->sq.smq;
884
885         /* Enable CGX RXTX to drain pkts */
886         if (!dev_started) {
887                 rc = otx2_cgx_rxtx_start(dev);
888                 if (rc)
889                         return rc;
890         }
891
892         rc = otx2_nix_sq_sqb_aura_fc(txq, false);
893         if (rc < 0) {
894                 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
895                 goto cleanup;
896         }
897
898         /* Disable smq xoff for case it was enabled earlier */
899         rc = nix_smq_xoff(dev, smq, false);
900         if (rc) {
901                 otx2_err("Failed to enable smq for sq %u, rc=%d", txq->sq, rc);
902                 goto cleanup;
903         }
904
905         /* Wait for sq entries to be flushed */
906         nix_txq_flush_sq_spin(txq);
907
908         /* Flush and enable smq xoff */
909         rc = nix_smq_xoff(dev, smq, true);
910         if (rc) {
911                 otx2_err("Failed to disable smq for sq %u, rc=%d", txq->sq, rc);
912                 return rc;
913         }
914
915 cleanup:
916         /* Restore cgx state */
917         if (!dev_started)
918                 rc |= otx2_cgx_rxtx_stop(dev);
919
920         return rc;
921 }
922
923 static int
924 nix_tm_sw_xon(struct otx2_eth_txq *txq,
925               uint16_t smq, uint32_t rr_quantum)
926 {
927         struct otx2_eth_dev *dev = txq->dev;
928         struct otx2_mbox *mbox = dev->mbox;
929         struct nix_aq_enq_req *req;
930         int rc;
931
932         otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum %u",
933                     txq->sq, txq->sq, rr_quantum);
934         /* Set smq from sq */
935         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
936         req->qidx = txq->sq;
937         req->ctype = NIX_AQ_CTYPE_SQ;
938         req->op = NIX_AQ_INSTOP_WRITE;
939         req->sq.smq = smq;
940         req->sq.smq_rr_quantum = rr_quantum;
941         req->sq_mask.smq = ~req->sq_mask.smq;
942         req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
943
944         rc = otx2_mbox_process(mbox);
945         if (rc) {
946                 otx2_err("Failed to set smq, rc=%d", rc);
947                 return -EIO;
948         }
949
950         /* Enable sqb_aura fc */
951         rc = otx2_nix_sq_sqb_aura_fc(txq, true);
952         if (rc < 0) {
953                 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
954                 return rc;
955         }
956
957         /* Disable smq xoff */
958         rc = nix_smq_xoff(dev, smq, false);
959         if (rc) {
960                 otx2_err("Failed to enable smq for sq %u", txq->sq);
961                 return rc;
962         }
963
964         return 0;
965 }
966
967 static int
968 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
969                       uint32_t flags, bool hw_only)
970 {
971         struct otx2_nix_tm_shaper_profile *shaper_profile;
972         struct otx2_nix_tm_node *tm_node, *next_node;
973         struct otx2_mbox *mbox = dev->mbox;
974         struct nix_txsch_free_req *req;
975         uint32_t shaper_profile_id;
976         bool skip_node = false;
977         int rc = 0;
978
979         next_node = TAILQ_FIRST(&dev->node_list);
980         while (next_node) {
981                 tm_node = next_node;
982                 next_node = TAILQ_NEXT(tm_node, node);
983
984                 /* Check for only requested nodes */
985                 if ((tm_node->flags & flags_mask) != flags)
986                         continue;
987
988                 if (nix_tm_have_tl1_access(dev) &&
989                     tm_node->hw_lvl ==  NIX_TXSCH_LVL_TL1)
990                         skip_node = true;
991
992                 otx2_tm_dbg("Free hwres for node %u, hwlvl %u, hw_id %u (%p)",
993                             tm_node->id,  tm_node->hw_lvl,
994                             tm_node->hw_id, tm_node);
995                 /* Free specific HW resource if requested */
996                 if (!skip_node && flags_mask &&
997                     tm_node->flags & NIX_TM_NODE_HWRES) {
998                         req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
999                         req->flags = 0;
1000                         req->schq_lvl = tm_node->hw_lvl;
1001                         req->schq = tm_node->hw_id;
1002                         rc = otx2_mbox_process(mbox);
1003                         if (rc)
1004                                 break;
1005                 } else {
1006                         skip_node = false;
1007                 }
1008                 tm_node->flags &= ~NIX_TM_NODE_HWRES;
1009
1010                 /* Leave software elements if needed */
1011                 if (hw_only)
1012                         continue;
1013
1014                 shaper_profile_id = tm_node->params.shaper_profile_id;
1015                 shaper_profile =
1016                         nix_tm_shaper_profile_search(dev, shaper_profile_id);
1017                 if (shaper_profile)
1018                         shaper_profile->reference_count--;
1019
1020                 TAILQ_REMOVE(&dev->node_list, tm_node, node);
1021                 rte_free(tm_node);
1022         }
1023
1024         if (!flags_mask) {
1025                 /* Free all hw resources */
1026                 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
1027                 req->flags = TXSCHQ_FREE_ALL;
1028
1029                 return otx2_mbox_process(mbox);
1030         }
1031
1032         return rc;
1033 }
1034
1035 static uint8_t
1036 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
1037                        struct nix_txsch_alloc_rsp *rsp)
1038 {
1039         uint16_t schq;
1040         uint8_t lvl;
1041
1042         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1043                 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
1044                         dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
1045                         dev->txschq_contig_list[lvl][schq] =
1046                                 rsp->schq_contig_list[lvl][schq];
1047                 }
1048
1049                 dev->txschq[lvl] = rsp->schq[lvl];
1050                 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1051         }
1052         return 0;
1053 }
1054
1055 static int
1056 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1057                          struct otx2_nix_tm_node *child,
1058                          struct otx2_nix_tm_node *parent)
1059 {
1060         uint32_t hw_id, schq_con_index, prio_offset;
1061         uint32_t l_id, schq_index;
1062
1063         otx2_tm_dbg("Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)",
1064                     child->id, child->lvl, child->hw_lvl, child);
1065
1066         child->flags |= NIX_TM_NODE_HWRES;
1067
1068         /* Process root nodes */
1069         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1070             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1071                 int idx = 0;
1072                 uint32_t tschq_con_index;
1073
1074                 l_id = child->hw_lvl;
1075                 tschq_con_index = dev->txschq_contig_index[l_id];
1076                 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1077                 child->hw_id = hw_id;
1078                 dev->txschq_contig_index[l_id]++;
1079                 /* Update TL1 hw_id for its parent for config purpose */
1080                 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1081                 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1082                 child->parent_hw_id = hw_id;
1083                 return 0;
1084         }
1085         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1086             child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
1087                 uint32_t tschq_con_index;
1088
1089                 l_id = child->hw_lvl;
1090                 tschq_con_index = dev->txschq_index[l_id];
1091                 hw_id = dev->txschq_list[l_id][tschq_con_index];
1092                 child->hw_id = hw_id;
1093                 dev->txschq_index[l_id]++;
1094                 return 0;
1095         }
1096
1097         /* Process children with parents */
1098         l_id = child->hw_lvl;
1099         schq_index = dev->txschq_index[l_id];
1100         schq_con_index = dev->txschq_contig_index[l_id];
1101
1102         if (child->priority == parent->rr_prio) {
1103                 hw_id = dev->txschq_list[l_id][schq_index];
1104                 child->hw_id = hw_id;
1105                 child->parent_hw_id = parent->hw_id;
1106                 dev->txschq_index[l_id]++;
1107         } else {
1108                 prio_offset = schq_con_index + child->priority;
1109                 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1110                 child->hw_id = hw_id;
1111         }
1112         return 0;
1113 }
1114
1115 static int
1116 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1117 {
1118         struct otx2_nix_tm_node *parent, *child;
1119         uint32_t child_hw_lvl, con_index_inc, i;
1120
1121         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1122                 TAILQ_FOREACH(parent, &dev->node_list, node) {
1123                         child_hw_lvl = parent->hw_lvl - 1;
1124                         if (parent->hw_lvl != i)
1125                                 continue;
1126                         TAILQ_FOREACH(child, &dev->node_list, node) {
1127                                 if (!child->parent)
1128                                         continue;
1129                                 if (child->parent->id != parent->id)
1130                                         continue;
1131                                 nix_tm_assign_id_to_node(dev, child, parent);
1132                         }
1133
1134                         con_index_inc = parent->max_prio + 1;
1135                         dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1136
1137                         /*
1138                          * Explicitly assign id to parent node if it
1139                          * doesn't have a parent
1140                          */
1141                         if (parent->hw_lvl == dev->otx2_tm_root_lvl)
1142                                 nix_tm_assign_id_to_node(dev, parent, NULL);
1143                 }
1144         }
1145         return 0;
1146 }
1147
1148 static uint8_t
1149 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1150                       struct nix_txsch_alloc_req *req, uint8_t lvl)
1151 {
1152         struct otx2_nix_tm_node *tm_node;
1153         uint8_t contig_count;
1154
1155         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1156                 if (lvl == tm_node->hw_lvl) {
1157                         req->schq[lvl - 1] += tm_node->rr_num;
1158                         if (tm_node->max_prio != UINT32_MAX) {
1159                                 contig_count = tm_node->max_prio + 1;
1160                                 req->schq_contig[lvl - 1] += contig_count;
1161                         }
1162                 }
1163                 if (lvl == dev->otx2_tm_root_lvl &&
1164                     dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1165                     tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
1166                         req->schq_contig[dev->otx2_tm_root_lvl]++;
1167                 }
1168         }
1169
1170         req->schq[NIX_TXSCH_LVL_TL1] = 1;
1171         req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1172
1173         return 0;
1174 }
1175
1176 static int
1177 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1178                           struct nix_txsch_alloc_req *req)
1179 {
1180         uint8_t i;
1181
1182         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1183                 nix_tm_count_req_schq(dev, req, i);
1184
1185         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1186                 dev->txschq_index[i] = 0;
1187                 dev->txschq_contig_index[i] = 0;
1188         }
1189         return 0;
1190 }
1191
1192 static int
1193 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1194 {
1195         struct otx2_mbox *mbox = dev->mbox;
1196         struct nix_txsch_alloc_req *req;
1197         struct nix_txsch_alloc_rsp *rsp;
1198         int rc;
1199
1200         req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1201
1202         rc = nix_tm_prepare_txschq_req(dev, req);
1203         if (rc)
1204                 return rc;
1205
1206         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1207         if (rc)
1208                 return rc;
1209
1210         nix_tm_copy_rsp_to_dev(dev, rsp);
1211         dev->link_cfg_lvl = rsp->link_cfg_lvl;
1212
1213         nix_tm_assign_hw_id(dev);
1214         return 0;
1215 }
1216
1217 static int
1218 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1219 {
1220         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1221         struct otx2_nix_tm_node *tm_node;
1222         uint16_t sq, smq, rr_quantum;
1223         struct otx2_eth_txq *txq;
1224         int rc;
1225
1226         nix_tm_update_parent_info(dev);
1227
1228         rc = nix_tm_send_txsch_alloc_msg(dev);
1229         if (rc) {
1230                 otx2_err("TM failed to alloc tm resources=%d", rc);
1231                 return rc;
1232         }
1233
1234         rc = nix_tm_txsch_reg_config(dev);
1235         if (rc) {
1236                 otx2_err("TM failed to configure sched registers=%d", rc);
1237                 return rc;
1238         }
1239
1240         /* Enable xmit as all the topology is ready */
1241         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1242                 if (tm_node->flags & NIX_TM_NODE_ENABLED)
1243                         continue;
1244
1245                 /* Enable xmit on sq */
1246                 if (tm_node->lvl != OTX2_TM_LVL_QUEUE) {
1247                         tm_node->flags |= NIX_TM_NODE_ENABLED;
1248                         continue;
1249                 }
1250
1251                 /* Don't enable SMQ or mark as enable */
1252                 if (!xmit_enable)
1253                         continue;
1254
1255                 sq = tm_node->id;
1256                 if (sq > eth_dev->data->nb_tx_queues) {
1257                         rc = -EFAULT;
1258                         break;
1259                 }
1260
1261                 txq = eth_dev->data->tx_queues[sq];
1262
1263                 smq = tm_node->parent->hw_id;
1264                 rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1265
1266                 rc = nix_tm_sw_xon(txq, smq, rr_quantum);
1267                 if (rc)
1268                         break;
1269                 tm_node->flags |= NIX_TM_NODE_ENABLED;
1270         }
1271
1272         if (rc)
1273                 otx2_err("TM failed to enable xmit on sq %u, rc=%d", sq, rc);
1274
1275         return rc;
1276 }
1277
1278 static int
1279 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
1280 {
1281         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1282         uint32_t def = eth_dev->data->nb_tx_queues;
1283         struct rte_tm_node_params params;
1284         uint32_t leaf_parent, i;
1285         int rc = 0;
1286
1287         /* Default params */
1288         memset(&params, 0, sizeof(params));
1289         params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
1290
1291         if (nix_tm_have_tl1_access(dev)) {
1292                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
1293                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1294                                              DEFAULT_RR_WEIGHT,
1295                                              NIX_TXSCH_LVL_TL1,
1296                                              OTX2_TM_LVL_ROOT, false, &params);
1297                 if (rc)
1298                         goto exit;
1299                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1300                                              DEFAULT_RR_WEIGHT,
1301                                              NIX_TXSCH_LVL_TL2,
1302                                              OTX2_TM_LVL_SCH1, false, &params);
1303                 if (rc)
1304                         goto exit;
1305
1306                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1307                                              DEFAULT_RR_WEIGHT,
1308                                              NIX_TXSCH_LVL_TL3,
1309                                              OTX2_TM_LVL_SCH2, false, &params);
1310                 if (rc)
1311                         goto exit;
1312
1313                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1314                                              DEFAULT_RR_WEIGHT,
1315                                              NIX_TXSCH_LVL_TL4,
1316                                              OTX2_TM_LVL_SCH3, false, &params);
1317                 if (rc)
1318                         goto exit;
1319
1320                 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
1321                                              DEFAULT_RR_WEIGHT,
1322                                              NIX_TXSCH_LVL_SMQ,
1323                                              OTX2_TM_LVL_SCH4, false, &params);
1324                 if (rc)
1325                         goto exit;
1326
1327                 leaf_parent = def + 4;
1328         } else {
1329                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
1330                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1331                                              DEFAULT_RR_WEIGHT,
1332                                              NIX_TXSCH_LVL_TL2,
1333                                              OTX2_TM_LVL_ROOT, false, &params);
1334                 if (rc)
1335                         goto exit;
1336
1337                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1338                                              DEFAULT_RR_WEIGHT,
1339                                              NIX_TXSCH_LVL_TL3,
1340                                              OTX2_TM_LVL_SCH1, false, &params);
1341                 if (rc)
1342                         goto exit;
1343
1344                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1345                                              DEFAULT_RR_WEIGHT,
1346                                              NIX_TXSCH_LVL_TL4,
1347                                              OTX2_TM_LVL_SCH2, false, &params);
1348                 if (rc)
1349                         goto exit;
1350
1351                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1352                                              DEFAULT_RR_WEIGHT,
1353                                              NIX_TXSCH_LVL_SMQ,
1354                                              OTX2_TM_LVL_SCH3, false, &params);
1355                 if (rc)
1356                         goto exit;
1357
1358                 leaf_parent = def + 3;
1359         }
1360
1361         /* Add leaf nodes */
1362         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1363                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
1364                                              DEFAULT_RR_WEIGHT,
1365                                              NIX_TXSCH_LVL_CNT,
1366                                              OTX2_TM_LVL_QUEUE, false, &params);
1367                 if (rc)
1368                         break;
1369         }
1370
1371 exit:
1372         return rc;
1373 }
1374
1375 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
1376 {
1377         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1378
1379         TAILQ_INIT(&dev->node_list);
1380         TAILQ_INIT(&dev->shaper_profile_list);
1381 }
1382
1383 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
1384 {
1385         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1386         struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
1387         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1388         int rc;
1389
1390         /* Free up all resources already held */
1391         rc = nix_tm_free_resources(dev, 0, 0, false);
1392         if (rc) {
1393                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1394                 return rc;
1395         }
1396
1397         /* Clear shaper profiles */
1398         nix_tm_clear_shaper_profiles(dev);
1399         dev->tm_flags = NIX_TM_DEFAULT_TREE;
1400
1401         /* Disable TL1 Static Priority when VF's are enabled
1402          * as otherwise VF's TL2 reallocation will be needed
1403          * runtime to support a specific topology of PF.
1404          */
1405         if (pci_dev->max_vfs)
1406                 dev->tm_flags |= NIX_TM_TL1_NO_SP;
1407
1408         rc = nix_tm_prepare_default_tree(eth_dev);
1409         if (rc != 0)
1410                 return rc;
1411
1412         rc = nix_tm_alloc_resources(eth_dev, false);
1413         if (rc != 0)
1414                 return rc;
1415         dev->tm_leaf_cnt = sq_cnt;
1416
1417         return 0;
1418 }
1419
1420 int
1421 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
1422 {
1423         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1424         int rc;
1425
1426         /* Xmit is assumed to be disabled */
1427         /* Free up resources already held */
1428         rc = nix_tm_free_resources(dev, 0, 0, false);
1429         if (rc) {
1430                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1431                 return rc;
1432         }
1433
1434         /* Clear shaper profiles */
1435         nix_tm_clear_shaper_profiles(dev);
1436
1437         dev->tm_flags = 0;
1438         return 0;
1439 }
1440
1441 int
1442 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
1443                           uint32_t *rr_quantum, uint16_t *smq)
1444 {
1445         struct otx2_nix_tm_node *tm_node;
1446         int rc;
1447
1448         /* 0..sq_cnt-1 are leaf nodes */
1449         if (sq >= dev->tm_leaf_cnt)
1450                 return -EINVAL;
1451
1452         /* Search for internal node first */
1453         tm_node = nix_tm_node_search(dev, sq, false);
1454         if (!tm_node)
1455                 tm_node = nix_tm_node_search(dev, sq, true);
1456
1457         /* Check if we found a valid leaf node */
1458         if (!tm_node || tm_node->lvl != OTX2_TM_LVL_QUEUE ||
1459             !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
1460                 return -EIO;
1461         }
1462
1463         /* Get SMQ Id of leaf node's parent */
1464         *smq = tm_node->parent->hw_id;
1465         *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
1466
1467         rc = nix_smq_xoff(dev, *smq, false);
1468         if (rc)
1469                 return rc;
1470         tm_node->flags |= NIX_TM_NODE_ENABLED;
1471
1472         return 0;
1473 }