2364e0314656484090d5938e7395863f5b4839cb
[dpdk.git] / drivers / net / octeontx2 / otx2_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_malloc.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_tm.h"
9
10 /* Use last LVL_CNT nodes as default nodes */
11 #define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
12
13 enum otx2_tm_node_level {
14         OTX2_TM_LVL_ROOT = 0,
15         OTX2_TM_LVL_SCH1,
16         OTX2_TM_LVL_SCH2,
17         OTX2_TM_LVL_SCH3,
18         OTX2_TM_LVL_SCH4,
19         OTX2_TM_LVL_QUEUE,
20         OTX2_TM_LVL_MAX,
21 };
22
23 static inline
24 uint64_t shaper2regval(struct shaper_params *shaper)
25 {
26         return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
27                 (shaper->div_exp << 13) | (shaper->exponent << 9) |
28                 (shaper->mantissa << 1);
29 }
30
31 static int
32 nix_get_link(struct otx2_eth_dev *dev)
33 {
34         int link = 13 /* SDP */;
35         uint16_t lmac_chan;
36         uint16_t map;
37
38         lmac_chan = dev->tx_chan_base;
39
40         /* CGX lmac link */
41         if (lmac_chan >= 0x800) {
42                 map = lmac_chan & 0x7FF;
43                 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
44         } else if (lmac_chan < 0x700) {
45                 /* LBK channel */
46                 link = 12;
47         }
48
49         return link;
50 }
51
52 static uint8_t
53 nix_get_relchan(struct otx2_eth_dev *dev)
54 {
55         return dev->tx_chan_base & 0xff;
56 }
57
58 static bool
59 nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
60 {
61         bool is_lbk = otx2_dev_is_lbk(dev);
62         return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) &&
63                 !is_lbk && !dev->maxvf;
64 }
65
66 static int
67 find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
68 {
69         struct otx2_nix_tm_node *child_node;
70
71         TAILQ_FOREACH(child_node, &dev->node_list, node) {
72                 if (!child_node->parent)
73                         continue;
74                 if (!(child_node->parent->id == node_id))
75                         continue;
76                 if (child_node->priority == child_node->parent->rr_prio)
77                         continue;
78                 return child_node->hw_id - child_node->priority;
79         }
80         return 0;
81 }
82
83
84 static struct otx2_nix_tm_shaper_profile *
85 nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
86 {
87         struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
88
89         TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
90                 if (tm_shaper_profile->shaper_profile_id == shaper_id)
91                         return tm_shaper_profile;
92         }
93         return NULL;
94 }
95
96 static inline uint64_t
97 shaper_rate_to_nix(uint64_t cclk_hz, uint64_t cclk_ticks,
98                    uint64_t value, uint64_t *exponent_p,
99                    uint64_t *mantissa_p, uint64_t *div_exp_p)
100 {
101         uint64_t div_exp, exponent, mantissa;
102
103         /* Boundary checks */
104         if (value < MIN_SHAPER_RATE(cclk_hz, cclk_ticks) ||
105             value > MAX_SHAPER_RATE(cclk_hz, cclk_ticks))
106                 return 0;
107
108         if (value <= SHAPER_RATE(cclk_hz, cclk_ticks, 0, 0, 0)) {
109                 /* Calculate rate div_exp and mantissa using
110                  * the following formula:
111                  *
112                  * value = (cclk_hz * (256 + mantissa)
113                  *              / ((cclk_ticks << div_exp) * 256)
114                  */
115                 div_exp = 0;
116                 exponent = 0;
117                 mantissa = MAX_RATE_MANTISSA;
118
119                 while (value < (cclk_hz / (cclk_ticks << div_exp)))
120                         div_exp += 1;
121
122                 while (value <
123                        ((cclk_hz * (256 + mantissa)) /
124                         ((cclk_ticks << div_exp) * 256)))
125                         mantissa -= 1;
126         } else {
127                 /* Calculate rate exponent and mantissa using
128                  * the following formula:
129                  *
130                  * value = (cclk_hz * ((256 + mantissa) << exponent)
131                  *              / (cclk_ticks * 256)
132                  *
133                  */
134                 div_exp = 0;
135                 exponent = MAX_RATE_EXPONENT;
136                 mantissa = MAX_RATE_MANTISSA;
137
138                 while (value < (cclk_hz * (1 << exponent)) / cclk_ticks)
139                         exponent -= 1;
140
141                 while (value < (cclk_hz * ((256 + mantissa) << exponent)) /
142                        (cclk_ticks * 256))
143                         mantissa -= 1;
144         }
145
146         if (div_exp > MAX_RATE_DIV_EXP ||
147             exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
148                 return 0;
149
150         if (div_exp_p)
151                 *div_exp_p = div_exp;
152         if (exponent_p)
153                 *exponent_p = exponent;
154         if (mantissa_p)
155                 *mantissa_p = mantissa;
156
157         /* Calculate real rate value */
158         return SHAPER_RATE(cclk_hz, cclk_ticks, exponent, mantissa, div_exp);
159 }
160
161 static inline uint64_t
162 lx_shaper_rate_to_nix(uint64_t cclk_hz, uint32_t hw_lvl,
163                       uint64_t value, uint64_t *exponent,
164                       uint64_t *mantissa, uint64_t *div_exp)
165 {
166         if (hw_lvl == NIX_TXSCH_LVL_TL1)
167                 return shaper_rate_to_nix(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS,
168                                           value, exponent, mantissa, div_exp);
169         else
170                 return shaper_rate_to_nix(cclk_hz, LX_TIME_WHEEL_CCLK_TICKS,
171                                           value, exponent, mantissa, div_exp);
172 }
173
174 static inline uint64_t
175 shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
176                     uint64_t *mantissa_p)
177 {
178         uint64_t exponent, mantissa;
179
180         if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
181                 return 0;
182
183         /* Calculate burst exponent and mantissa using
184          * the following formula:
185          *
186          * value = (((256 + mantissa) << (exponent + 1)
187          / 256)
188          *
189          */
190         exponent = MAX_BURST_EXPONENT;
191         mantissa = MAX_BURST_MANTISSA;
192
193         while (value < (1ull << (exponent + 1)))
194                 exponent -= 1;
195
196         while (value < ((256 + mantissa) << (exponent + 1)) / 256)
197                 mantissa -= 1;
198
199         if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
200                 return 0;
201
202         if (exponent_p)
203                 *exponent_p = exponent;
204         if (mantissa_p)
205                 *mantissa_p = mantissa;
206
207         return SHAPER_BURST(exponent, mantissa);
208 }
209
210 static int
211 configure_shaper_cir_pir_reg(struct otx2_eth_dev *dev,
212                              struct otx2_nix_tm_node *tm_node,
213                              struct shaper_params *cir,
214                              struct shaper_params *pir)
215 {
216         uint32_t shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
217         struct otx2_nix_tm_shaper_profile *shaper_profile = NULL;
218         struct rte_tm_shaper_params *param;
219
220         shaper_profile_id = tm_node->params.shaper_profile_id;
221
222         shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);
223         if (shaper_profile) {
224                 param = &shaper_profile->profile;
225                 /* Calculate CIR exponent and mantissa */
226                 if (param->committed.rate)
227                         cir->rate = lx_shaper_rate_to_nix(CCLK_HZ,
228                                                           tm_node->hw_lvl_id,
229                                                           param->committed.rate,
230                                                           &cir->exponent,
231                                                           &cir->mantissa,
232                                                           &cir->div_exp);
233
234                 /* Calculate PIR exponent and mantissa */
235                 if (param->peak.rate)
236                         pir->rate = lx_shaper_rate_to_nix(CCLK_HZ,
237                                                           tm_node->hw_lvl_id,
238                                                           param->peak.rate,
239                                                           &pir->exponent,
240                                                           &pir->mantissa,
241                                                           &pir->div_exp);
242
243                 /* Calculate CIR burst exponent and mantissa */
244                 if (param->committed.size)
245                         cir->burst = shaper_burst_to_nix(param->committed.size,
246                                                          &cir->burst_exponent,
247                                                          &cir->burst_mantissa);
248
249                 /* Calculate PIR burst exponent and mantissa */
250                 if (param->peak.size)
251                         pir->burst = shaper_burst_to_nix(param->peak.size,
252                                                          &pir->burst_exponent,
253                                                          &pir->burst_mantissa);
254         }
255
256         return 0;
257 }
258
259 static int
260 send_tm_reqval(struct otx2_mbox *mbox, struct nix_txschq_config *req)
261 {
262         int rc;
263
264         if (req->num_regs > MAX_REGS_PER_MBOX_MSG)
265                 return -ERANGE;
266
267         rc = otx2_mbox_process(mbox);
268         if (rc)
269                 return rc;
270
271         req->num_regs = 0;
272         return 0;
273 }
274
275 static int
276 populate_tm_registers(struct otx2_eth_dev *dev,
277                       struct otx2_nix_tm_node *tm_node)
278 {
279         uint64_t strict_schedul_prio, rr_prio;
280         struct otx2_mbox *mbox = dev->mbox;
281         volatile uint64_t *reg, *regval;
282         uint64_t parent = 0, child = 0;
283         struct shaper_params cir, pir;
284         struct nix_txschq_config *req;
285         uint64_t rr_quantum;
286         uint32_t hw_lvl;
287         uint32_t schq;
288         int rc;
289
290         memset(&cir, 0, sizeof(cir));
291         memset(&pir, 0, sizeof(pir));
292
293         /* Skip leaf nodes */
294         if (tm_node->hw_lvl_id == NIX_TXSCH_LVL_CNT)
295                 return 0;
296
297         /* Root node will not have a parent node */
298         if (tm_node->hw_lvl_id == dev->otx2_tm_root_lvl)
299                 parent = tm_node->parent_hw_id;
300         else
301                 parent = tm_node->parent->hw_id;
302
303         /* Do we need this trigger to configure TL1 */
304         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
305             tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) {
306                 schq = parent;
307                 /*
308                  * Default config for TL1.
309                  * For VF this is always ignored.
310                  */
311
312                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
313                 req->lvl = NIX_TXSCH_LVL_TL1;
314
315                 /* Set DWRR quantum */
316                 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
317                 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
318                 req->num_regs++;
319
320                 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
321                 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
322                 req->num_regs++;
323
324                 req->reg[2] = NIX_AF_TL1X_CIR(schq);
325                 req->regval[2] = 0;
326                 req->num_regs++;
327
328                 rc = send_tm_reqval(mbox, req);
329                 if (rc)
330                         goto error;
331         }
332
333         if (tm_node->hw_lvl_id != NIX_TXSCH_LVL_SMQ)
334                 child = find_prio_anchor(dev, tm_node->id);
335
336         rr_prio = tm_node->rr_prio;
337         hw_lvl = tm_node->hw_lvl_id;
338         strict_schedul_prio = tm_node->priority;
339         schq = tm_node->hw_id;
340         rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX) /
341                 MAX_SCHED_WEIGHT;
342
343         configure_shaper_cir_pir_reg(dev, tm_node, &cir, &pir);
344
345         otx2_tm_dbg("Configure node %p, lvl %u hw_lvl %u, id %u, hw_id %u,"
346                      "parent_hw_id %" PRIx64 ", pir %" PRIx64 ", cir %" PRIx64,
347                      tm_node, tm_node->level_id, hw_lvl,
348                      tm_node->id, schq, parent, pir.rate, cir.rate);
349
350         rc = -EFAULT;
351
352         switch (hw_lvl) {
353         case NIX_TXSCH_LVL_SMQ:
354                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
355                 req->lvl = hw_lvl;
356                 reg = req->reg;
357                 regval = req->regval;
358                 req->num_regs = 0;
359
360                 /* Set xoff which will be cleared later */
361                 *reg++ = NIX_AF_SMQX_CFG(schq);
362                 *regval++ = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) |
363                                 (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
364                 req->num_regs++;
365                 *reg++ = NIX_AF_MDQX_PARENT(schq);
366                 *regval++ = parent << 16;
367                 req->num_regs++;
368                 *reg++ = NIX_AF_MDQX_SCHEDULE(schq);
369                 *regval++ = (strict_schedul_prio << 24) | rr_quantum;
370                 req->num_regs++;
371                 if (pir.rate && pir.burst) {
372                         *reg++ = NIX_AF_MDQX_PIR(schq);
373                         *regval++ = shaper2regval(&pir) | 1;
374                         req->num_regs++;
375                 }
376
377                 if (cir.rate && cir.burst) {
378                         *reg++ = NIX_AF_MDQX_CIR(schq);
379                         *regval++ = shaper2regval(&cir) | 1;
380                         req->num_regs++;
381                 }
382
383                 rc = send_tm_reqval(mbox, req);
384                 if (rc)
385                         goto error;
386                 break;
387         case NIX_TXSCH_LVL_TL4:
388                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
389                 req->lvl = hw_lvl;
390                 req->num_regs = 0;
391                 reg = req->reg;
392                 regval = req->regval;
393
394                 *reg++ = NIX_AF_TL4X_PARENT(schq);
395                 *regval++ = parent << 16;
396                 req->num_regs++;
397                 *reg++ = NIX_AF_TL4X_TOPOLOGY(schq);
398                 *regval++ = (child << 32) | (rr_prio << 1);
399                 req->num_regs++;
400                 *reg++ = NIX_AF_TL4X_SCHEDULE(schq);
401                 *regval++ = (strict_schedul_prio << 24) | rr_quantum;
402                 req->num_regs++;
403                 if (pir.rate && pir.burst) {
404                         *reg++ = NIX_AF_TL4X_PIR(schq);
405                         *regval++ = shaper2regval(&pir) | 1;
406                         req->num_regs++;
407                 }
408                 if (cir.rate && cir.burst) {
409                         *reg++ = NIX_AF_TL4X_CIR(schq);
410                         *regval++ = shaper2regval(&cir) | 1;
411                         req->num_regs++;
412                 }
413                 /* Configure TL4 to send to SDP channel instead of CGX/LBK */
414                 if (otx2_dev_is_sdp(dev)) {
415                         *reg++ = NIX_AF_TL4X_SDP_LINK_CFG(schq);
416                         *regval++ = BIT_ULL(12);
417                         req->num_regs++;
418                 }
419
420                 rc = send_tm_reqval(mbox, req);
421                 if (rc)
422                         goto error;
423                 break;
424         case NIX_TXSCH_LVL_TL3:
425                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
426                 req->lvl = hw_lvl;
427                 req->num_regs = 0;
428                 reg = req->reg;
429                 regval = req->regval;
430
431                 *reg++ = NIX_AF_TL3X_PARENT(schq);
432                 *regval++ = parent << 16;
433                 req->num_regs++;
434                 *reg++ = NIX_AF_TL3X_TOPOLOGY(schq);
435                 *regval++ = (child << 32) | (rr_prio << 1);
436                 req->num_regs++;
437                 *reg++ = NIX_AF_TL3X_SCHEDULE(schq);
438                 *regval++ = (strict_schedul_prio << 24) | rr_quantum;
439                 req->num_regs++;
440
441                 /* Link configuration */
442                 if (!otx2_dev_is_sdp(dev) &&
443                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
444                         *reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
445                                                 nix_get_link(dev));
446                         *regval++ = BIT_ULL(12) | nix_get_relchan(dev);
447                         req->num_regs++;
448                 }
449
450                 if (pir.rate && pir.burst) {
451                         *reg++ = NIX_AF_TL3X_PIR(schq);
452                         *regval++ = shaper2regval(&pir) | 1;
453                         req->num_regs++;
454                 }
455                 if (cir.rate && cir.burst) {
456                         *reg++ = NIX_AF_TL3X_CIR(schq);
457                         *regval++ = shaper2regval(&cir) | 1;
458                         req->num_regs++;
459                 }
460
461                 rc = send_tm_reqval(mbox, req);
462                 if (rc)
463                         goto error;
464                 break;
465         case NIX_TXSCH_LVL_TL2:
466                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
467                 req->lvl = hw_lvl;
468                 req->num_regs = 0;
469                 reg = req->reg;
470                 regval = req->regval;
471
472                 *reg++ = NIX_AF_TL2X_PARENT(schq);
473                 *regval++ = parent << 16;
474                 req->num_regs++;
475                 *reg++ = NIX_AF_TL2X_TOPOLOGY(schq);
476                 *regval++ = (child << 32) | (rr_prio << 1);
477                 req->num_regs++;
478                 *reg++ = NIX_AF_TL2X_SCHEDULE(schq);
479                 if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2)
480                         *regval++ = (1 << 24) | rr_quantum;
481                 else
482                         *regval++ = (strict_schedul_prio << 24) | rr_quantum;
483                 req->num_regs++;
484
485                 /* Link configuration */
486                 if (!otx2_dev_is_sdp(dev) &&
487                     dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
488                         *reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
489                                                 nix_get_link(dev));
490                         *regval++ = BIT_ULL(12) | nix_get_relchan(dev);
491                         req->num_regs++;
492                 }
493                 if (pir.rate && pir.burst) {
494                         *reg++ = NIX_AF_TL2X_PIR(schq);
495                         *regval++ = shaper2regval(&pir) | 1;
496                         req->num_regs++;
497                 }
498                 if (cir.rate && cir.burst) {
499                         *reg++ = NIX_AF_TL2X_CIR(schq);
500                         *regval++ = shaper2regval(&cir) | 1;
501                         req->num_regs++;
502                 }
503
504                 rc = send_tm_reqval(mbox, req);
505                 if (rc)
506                         goto error;
507                 break;
508         case NIX_TXSCH_LVL_TL1:
509                 req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
510                 req->lvl = hw_lvl;
511                 req->num_regs = 0;
512                 reg = req->reg;
513                 regval = req->regval;
514
515                 *reg++ = NIX_AF_TL1X_SCHEDULE(schq);
516                 *regval++ = rr_quantum;
517                 req->num_regs++;
518                 *reg++ = NIX_AF_TL1X_TOPOLOGY(schq);
519                 *regval++ = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
520                 req->num_regs++;
521                 if (cir.rate && cir.burst) {
522                         *reg++ = NIX_AF_TL1X_CIR(schq);
523                         *regval++ = shaper2regval(&cir) | 1;
524                         req->num_regs++;
525                 }
526
527                 rc = send_tm_reqval(mbox, req);
528                 if (rc)
529                         goto error;
530                 break;
531         }
532
533         return 0;
534 error:
535         otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
536         return rc;
537 }
538
539
540 static int
541 nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
542 {
543         struct otx2_nix_tm_node *tm_node;
544         uint32_t lvl;
545         int rc = 0;
546
547         for (lvl = 0; lvl < (uint32_t)dev->otx2_tm_root_lvl + 1; lvl++) {
548                 TAILQ_FOREACH(tm_node, &dev->node_list, node) {
549                         if (tm_node->hw_lvl_id == lvl) {
550                                 rc = populate_tm_registers(dev, tm_node);
551                                 if (rc)
552                                         goto exit;
553                         }
554                 }
555         }
556 exit:
557         return rc;
558 }
559
560 static struct otx2_nix_tm_node *
561 nix_tm_node_search(struct otx2_eth_dev *dev,
562                    uint32_t node_id, bool user)
563 {
564         struct otx2_nix_tm_node *tm_node;
565
566         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
567                 if (tm_node->id == node_id &&
568                     (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
569                         return tm_node;
570         }
571         return NULL;
572 }
573
574 static uint32_t
575 check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
576 {
577         struct otx2_nix_tm_node *tm_node;
578         uint32_t rr_num = 0;
579
580         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
581                 if (!tm_node->parent)
582                         continue;
583
584                 if (!(tm_node->parent->id == parent_id))
585                         continue;
586
587                 if (tm_node->priority == priority)
588                         rr_num++;
589         }
590         return rr_num;
591 }
592
593 static int
594 nix_tm_update_parent_info(struct otx2_eth_dev *dev)
595 {
596         struct otx2_nix_tm_node *tm_node_child;
597         struct otx2_nix_tm_node *tm_node;
598         struct otx2_nix_tm_node *parent;
599         uint32_t rr_num = 0;
600         uint32_t priority;
601
602         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
603                 if (!tm_node->parent)
604                         continue;
605                 /* Count group of children of same priority i.e are RR */
606                 parent = tm_node->parent;
607                 priority = tm_node->priority;
608                 rr_num = check_rr(dev, priority, parent->id);
609
610                 /* Assuming that multiple RR groups are
611                  * not configured based on capability.
612                  */
613                 if (rr_num > 1) {
614                         parent->rr_prio = priority;
615                         parent->rr_num = rr_num;
616                 }
617
618                 /* Find out static priority children that are not in RR */
619                 TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
620                         if (!tm_node_child->parent)
621                                 continue;
622                         if (parent->id != tm_node_child->parent->id)
623                                 continue;
624                         if (parent->max_prio == UINT32_MAX &&
625                             tm_node_child->priority != parent->rr_prio)
626                                 parent->max_prio = 0;
627
628                         if (parent->max_prio < tm_node_child->priority &&
629                             parent->rr_prio != tm_node_child->priority)
630                                 parent->max_prio = tm_node_child->priority;
631                 }
632         }
633
634         return 0;
635 }
636
637 static int
638 nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
639                         uint32_t parent_node_id, uint32_t priority,
640                         uint32_t weight, uint16_t hw_lvl_id,
641                         uint16_t level_id, bool user,
642                         struct rte_tm_node_params *params)
643 {
644         struct otx2_nix_tm_shaper_profile *shaper_profile;
645         struct otx2_nix_tm_node *tm_node, *parent_node;
646         uint32_t shaper_profile_id;
647
648         shaper_profile_id = params->shaper_profile_id;
649         shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id);
650
651         parent_node = nix_tm_node_search(dev, parent_node_id, user);
652
653         tm_node = rte_zmalloc("otx2_nix_tm_node",
654                               sizeof(struct otx2_nix_tm_node), 0);
655         if (!tm_node)
656                 return -ENOMEM;
657
658         tm_node->level_id = level_id;
659         tm_node->hw_lvl_id = hw_lvl_id;
660
661         tm_node->id = node_id;
662         tm_node->priority = priority;
663         tm_node->weight = weight;
664         tm_node->rr_prio = 0xf;
665         tm_node->max_prio = UINT32_MAX;
666         tm_node->hw_id = UINT32_MAX;
667         tm_node->flags = 0;
668         if (user)
669                 tm_node->flags = NIX_TM_NODE_USER;
670         rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
671
672         if (shaper_profile)
673                 shaper_profile->reference_count++;
674         tm_node->parent = parent_node;
675         tm_node->parent_hw_id = UINT32_MAX;
676
677         TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
678
679         return 0;
680 }
681
682 static int
683 nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
684 {
685         struct otx2_nix_tm_shaper_profile *shaper_profile;
686
687         while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
688                 if (shaper_profile->reference_count)
689                         otx2_tm_dbg("Shaper profile %u has non zero references",
690                                     shaper_profile->shaper_profile_id);
691                 TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
692                 rte_free(shaper_profile);
693         }
694
695         return 0;
696 }
697
698 static int
699 nix_smq_xoff(struct otx2_eth_dev *dev, uint16_t smq, bool enable)
700 {
701         struct otx2_mbox *mbox = dev->mbox;
702         struct nix_txschq_config *req;
703
704         req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
705         req->lvl = NIX_TXSCH_LVL_SMQ;
706         req->num_regs = 1;
707
708         req->reg[0] = NIX_AF_SMQX_CFG(smq);
709         /* Unmodified fields */
710         req->regval[0] = ((uint64_t)NIX_MAX_VTAG_INS << 36) |
711                                 (NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS;
712
713         if (enable)
714                 req->regval[0] |= BIT_ULL(50) | BIT_ULL(49);
715         else
716                 req->regval[0] |= 0;
717
718         return otx2_mbox_process(mbox);
719 }
720
721 int
722 otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
723 {
724         struct otx2_eth_txq *txq = __txq;
725         struct npa_aq_enq_req *req;
726         struct npa_aq_enq_rsp *rsp;
727         struct otx2_npa_lf *lf;
728         struct otx2_mbox *mbox;
729         uint64_t aura_handle;
730         int rc;
731
732         lf = otx2_npa_lf_obj_get();
733         if (!lf)
734                 return -EFAULT;
735         mbox = lf->mbox;
736         /* Set/clear sqb aura fc_ena */
737         aura_handle = txq->sqb_pool->pool_id;
738         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
739
740         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
741         req->ctype = NPA_AQ_CTYPE_AURA;
742         req->op = NPA_AQ_INSTOP_WRITE;
743         /* Below is not needed for aura writes but AF driver needs it */
744         /* AF will translate to associated poolctx */
745         req->aura.pool_addr = req->aura_id;
746
747         req->aura.fc_ena = enable;
748         req->aura_mask.fc_ena = 1;
749
750         rc = otx2_mbox_process(mbox);
751         if (rc)
752                 return rc;
753
754         /* Read back npa aura ctx */
755         req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
756
757         req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
758         req->ctype = NPA_AQ_CTYPE_AURA;
759         req->op = NPA_AQ_INSTOP_READ;
760
761         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
762         if (rc)
763                 return rc;
764
765         /* Init when enabled as there might be no triggers */
766         if (enable)
767                 *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
768         else
769                 *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
770         /* Sync write barrier */
771         rte_wmb();
772
773         return 0;
774 }
775
776 static void
777 nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
778 {
779         uint16_t sqb_cnt, head_off, tail_off;
780         struct otx2_eth_dev *dev = txq->dev;
781         uint16_t sq = txq->sq;
782         uint64_t reg, val;
783         int64_t *regaddr;
784
785         while (true) {
786                 reg = ((uint64_t)sq << 32);
787                 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
788                 val = otx2_atomic64_add_nosync(reg, regaddr);
789
790                 regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
791                 val = otx2_atomic64_add_nosync(reg, regaddr);
792                 sqb_cnt = val & 0xFFFF;
793                 head_off = (val >> 20) & 0x3F;
794                 tail_off = (val >> 28) & 0x3F;
795
796                 /* SQ reached quiescent state */
797                 if (sqb_cnt <= 1 && head_off == tail_off &&
798                     (*txq->fc_mem == txq->nb_sqb_bufs)) {
799                         break;
800                 }
801
802                 rte_pause();
803         }
804 }
805
806 int
807 otx2_nix_tm_sw_xoff(void *__txq, bool dev_started)
808 {
809         struct otx2_eth_txq *txq = __txq;
810         struct otx2_eth_dev *dev = txq->dev;
811         struct otx2_mbox *mbox = dev->mbox;
812         struct nix_aq_enq_req *req;
813         struct nix_aq_enq_rsp *rsp;
814         uint16_t smq;
815         int rc;
816
817         /* Get smq from sq */
818         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
819         req->qidx = txq->sq;
820         req->ctype = NIX_AQ_CTYPE_SQ;
821         req->op = NIX_AQ_INSTOP_READ;
822         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
823         if (rc) {
824                 otx2_err("Failed to get smq, rc=%d", rc);
825                 return -EIO;
826         }
827
828         /* Check if sq is enabled */
829         if (!rsp->sq.ena)
830                 return 0;
831
832         smq = rsp->sq.smq;
833
834         /* Enable CGX RXTX to drain pkts */
835         if (!dev_started) {
836                 rc = otx2_cgx_rxtx_start(dev);
837                 if (rc)
838                         return rc;
839         }
840
841         rc = otx2_nix_sq_sqb_aura_fc(txq, false);
842         if (rc < 0) {
843                 otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
844                 goto cleanup;
845         }
846
847         /* Disable smq xoff for case it was enabled earlier */
848         rc = nix_smq_xoff(dev, smq, false);
849         if (rc) {
850                 otx2_err("Failed to enable smq for sq %u, rc=%d", txq->sq, rc);
851                 goto cleanup;
852         }
853
854         /* Wait for sq entries to be flushed */
855         nix_txq_flush_sq_spin(txq);
856
857         /* Flush and enable smq xoff */
858         rc = nix_smq_xoff(dev, smq, true);
859         if (rc) {
860                 otx2_err("Failed to disable smq for sq %u, rc=%d", txq->sq, rc);
861                 return rc;
862         }
863
864 cleanup:
865         /* Restore cgx state */
866         if (!dev_started)
867                 rc |= otx2_cgx_rxtx_stop(dev);
868
869         return rc;
870 }
871
872 static int
873 nix_tm_sw_xon(struct otx2_eth_txq *txq,
874               uint16_t smq, uint32_t rr_quantum)
875 {
876         struct otx2_eth_dev *dev = txq->dev;
877         struct otx2_mbox *mbox = dev->mbox;
878         struct nix_aq_enq_req *req;
879         int rc;
880
881         otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum %u",
882                     txq->sq, txq->sq, rr_quantum);
883         /* Set smq from sq */
884         req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
885         req->qidx = txq->sq;
886         req->ctype = NIX_AQ_CTYPE_SQ;
887         req->op = NIX_AQ_INSTOP_WRITE;
888         req->sq.smq = smq;
889         req->sq.smq_rr_quantum = rr_quantum;
890         req->sq_mask.smq = ~req->sq_mask.smq;
891         req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
892
893         rc = otx2_mbox_process(mbox);
894         if (rc) {
895                 otx2_err("Failed to set smq, rc=%d", rc);
896                 return -EIO;
897         }
898
899         /* Enable sqb_aura fc */
900         rc = otx2_nix_sq_sqb_aura_fc(txq, true);
901         if (rc < 0) {
902                 otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
903                 return rc;
904         }
905
906         /* Disable smq xoff */
907         rc = nix_smq_xoff(dev, smq, false);
908         if (rc) {
909                 otx2_err("Failed to enable smq for sq %u", txq->sq);
910                 return rc;
911         }
912
913         return 0;
914 }
915
916 static int
917 nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
918                       uint32_t flags, bool hw_only)
919 {
920         struct otx2_nix_tm_shaper_profile *shaper_profile;
921         struct otx2_nix_tm_node *tm_node, *next_node;
922         struct otx2_mbox *mbox = dev->mbox;
923         struct nix_txsch_free_req *req;
924         uint32_t shaper_profile_id;
925         bool skip_node = false;
926         int rc = 0;
927
928         next_node = TAILQ_FIRST(&dev->node_list);
929         while (next_node) {
930                 tm_node = next_node;
931                 next_node = TAILQ_NEXT(tm_node, node);
932
933                 /* Check for only requested nodes */
934                 if ((tm_node->flags & flags_mask) != flags)
935                         continue;
936
937                 if (nix_tm_have_tl1_access(dev) &&
938                     tm_node->hw_lvl_id ==  NIX_TXSCH_LVL_TL1)
939                         skip_node = true;
940
941                 otx2_tm_dbg("Free hwres for node %u, hwlvl %u, hw_id %u (%p)",
942                             tm_node->id,  tm_node->hw_lvl_id,
943                             tm_node->hw_id, tm_node);
944                 /* Free specific HW resource if requested */
945                 if (!skip_node && flags_mask &&
946                     tm_node->flags & NIX_TM_NODE_HWRES) {
947                         req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
948                         req->flags = 0;
949                         req->schq_lvl = tm_node->hw_lvl_id;
950                         req->schq = tm_node->hw_id;
951                         rc = otx2_mbox_process(mbox);
952                         if (rc)
953                                 break;
954                 } else {
955                         skip_node = false;
956                 }
957                 tm_node->flags &= ~NIX_TM_NODE_HWRES;
958
959                 /* Leave software elements if needed */
960                 if (hw_only)
961                         continue;
962
963                 shaper_profile_id = tm_node->params.shaper_profile_id;
964                 shaper_profile =
965                         nix_tm_shaper_profile_search(dev, shaper_profile_id);
966                 if (shaper_profile)
967                         shaper_profile->reference_count--;
968
969                 TAILQ_REMOVE(&dev->node_list, tm_node, node);
970                 rte_free(tm_node);
971         }
972
973         if (!flags_mask) {
974                 /* Free all hw resources */
975                 req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
976                 req->flags = TXSCHQ_FREE_ALL;
977
978                 return otx2_mbox_process(mbox);
979         }
980
981         return rc;
982 }
983
984 static uint8_t
985 nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
986                        struct nix_txsch_alloc_rsp *rsp)
987 {
988         uint16_t schq;
989         uint8_t lvl;
990
991         for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
992                 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
993                         dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
994                         dev->txschq_contig_list[lvl][schq] =
995                                 rsp->schq_contig_list[lvl][schq];
996                 }
997
998                 dev->txschq[lvl] = rsp->schq[lvl];
999                 dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
1000         }
1001         return 0;
1002 }
1003
1004 static int
1005 nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
1006                          struct otx2_nix_tm_node *child,
1007                          struct otx2_nix_tm_node *parent)
1008 {
1009         uint32_t hw_id, schq_con_index, prio_offset;
1010         uint32_t l_id, schq_index;
1011
1012         otx2_tm_dbg("Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)",
1013                     child->id, child->level_id, child->hw_lvl_id, child);
1014
1015         child->flags |= NIX_TM_NODE_HWRES;
1016
1017         /* Process root nodes */
1018         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
1019             child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) {
1020                 int idx = 0;
1021                 uint32_t tschq_con_index;
1022
1023                 l_id = child->hw_lvl_id;
1024                 tschq_con_index = dev->txschq_contig_index[l_id];
1025                 hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
1026                 child->hw_id = hw_id;
1027                 dev->txschq_contig_index[l_id]++;
1028                 /* Update TL1 hw_id for its parent for config purpose */
1029                 idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
1030                 hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
1031                 child->parent_hw_id = hw_id;
1032                 return 0;
1033         }
1034         if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
1035             child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) {
1036                 uint32_t tschq_con_index;
1037
1038                 l_id = child->hw_lvl_id;
1039                 tschq_con_index = dev->txschq_index[l_id];
1040                 hw_id = dev->txschq_list[l_id][tschq_con_index];
1041                 child->hw_id = hw_id;
1042                 dev->txschq_index[l_id]++;
1043                 return 0;
1044         }
1045
1046         /* Process children with parents */
1047         l_id = child->hw_lvl_id;
1048         schq_index = dev->txschq_index[l_id];
1049         schq_con_index = dev->txschq_contig_index[l_id];
1050
1051         if (child->priority == parent->rr_prio) {
1052                 hw_id = dev->txschq_list[l_id][schq_index];
1053                 child->hw_id = hw_id;
1054                 child->parent_hw_id = parent->hw_id;
1055                 dev->txschq_index[l_id]++;
1056         } else {
1057                 prio_offset = schq_con_index + child->priority;
1058                 hw_id = dev->txschq_contig_list[l_id][prio_offset];
1059                 child->hw_id = hw_id;
1060         }
1061         return 0;
1062 }
1063
1064 static int
1065 nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
1066 {
1067         struct otx2_nix_tm_node *parent, *child;
1068         uint32_t child_hw_lvl, con_index_inc, i;
1069
1070         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
1071                 TAILQ_FOREACH(parent, &dev->node_list, node) {
1072                         child_hw_lvl = parent->hw_lvl_id - 1;
1073                         if (parent->hw_lvl_id != i)
1074                                 continue;
1075                         TAILQ_FOREACH(child, &dev->node_list, node) {
1076                                 if (!child->parent)
1077                                         continue;
1078                                 if (child->parent->id != parent->id)
1079                                         continue;
1080                                 nix_tm_assign_id_to_node(dev, child, parent);
1081                         }
1082
1083                         con_index_inc = parent->max_prio + 1;
1084                         dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
1085
1086                         /*
1087                          * Explicitly assign id to parent node if it
1088                          * doesn't have a parent
1089                          */
1090                         if (parent->hw_lvl_id == dev->otx2_tm_root_lvl)
1091                                 nix_tm_assign_id_to_node(dev, parent, NULL);
1092                 }
1093         }
1094         return 0;
1095 }
1096
1097 static uint8_t
1098 nix_tm_count_req_schq(struct otx2_eth_dev *dev,
1099                       struct nix_txsch_alloc_req *req, uint8_t lvl)
1100 {
1101         struct otx2_nix_tm_node *tm_node;
1102         uint8_t contig_count;
1103
1104         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1105                 if (lvl == tm_node->hw_lvl_id) {
1106                         req->schq[lvl - 1] += tm_node->rr_num;
1107                         if (tm_node->max_prio != UINT32_MAX) {
1108                                 contig_count = tm_node->max_prio + 1;
1109                                 req->schq_contig[lvl - 1] += contig_count;
1110                         }
1111                 }
1112                 if (lvl == dev->otx2_tm_root_lvl &&
1113                     dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
1114                     tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) {
1115                         req->schq_contig[dev->otx2_tm_root_lvl]++;
1116                 }
1117         }
1118
1119         req->schq[NIX_TXSCH_LVL_TL1] = 1;
1120         req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
1121
1122         return 0;
1123 }
1124
1125 static int
1126 nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
1127                           struct nix_txsch_alloc_req *req)
1128 {
1129         uint8_t i;
1130
1131         for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
1132                 nix_tm_count_req_schq(dev, req, i);
1133
1134         for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
1135                 dev->txschq_index[i] = 0;
1136                 dev->txschq_contig_index[i] = 0;
1137         }
1138         return 0;
1139 }
1140
1141 static int
1142 nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
1143 {
1144         struct otx2_mbox *mbox = dev->mbox;
1145         struct nix_txsch_alloc_req *req;
1146         struct nix_txsch_alloc_rsp *rsp;
1147         int rc;
1148
1149         req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
1150
1151         rc = nix_tm_prepare_txschq_req(dev, req);
1152         if (rc)
1153                 return rc;
1154
1155         rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1156         if (rc)
1157                 return rc;
1158
1159         nix_tm_copy_rsp_to_dev(dev, rsp);
1160         dev->link_cfg_lvl = rsp->link_cfg_lvl;
1161
1162         nix_tm_assign_hw_id(dev);
1163         return 0;
1164 }
1165
1166 static int
1167 nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
1168 {
1169         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1170         struct otx2_nix_tm_node *tm_node;
1171         uint16_t sq, smq, rr_quantum;
1172         struct otx2_eth_txq *txq;
1173         int rc;
1174
1175         nix_tm_update_parent_info(dev);
1176
1177         rc = nix_tm_send_txsch_alloc_msg(dev);
1178         if (rc) {
1179                 otx2_err("TM failed to alloc tm resources=%d", rc);
1180                 return rc;
1181         }
1182
1183         rc = nix_tm_txsch_reg_config(dev);
1184         if (rc) {
1185                 otx2_err("TM failed to configure sched registers=%d", rc);
1186                 return rc;
1187         }
1188
1189         /* Enable xmit as all the topology is ready */
1190         TAILQ_FOREACH(tm_node, &dev->node_list, node) {
1191                 if (tm_node->flags & NIX_TM_NODE_ENABLED)
1192                         continue;
1193
1194                 /* Enable xmit on sq */
1195                 if (tm_node->level_id != OTX2_TM_LVL_QUEUE) {
1196                         tm_node->flags |= NIX_TM_NODE_ENABLED;
1197                         continue;
1198                 }
1199
1200                 /* Don't enable SMQ or mark as enable */
1201                 if (!xmit_enable)
1202                         continue;
1203
1204                 sq = tm_node->id;
1205                 if (sq > eth_dev->data->nb_tx_queues) {
1206                         rc = -EFAULT;
1207                         break;
1208                 }
1209
1210                 txq = eth_dev->data->tx_queues[sq];
1211
1212                 smq = tm_node->parent->hw_id;
1213                 rr_quantum = (tm_node->weight *
1214                               NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT;
1215
1216                 rc = nix_tm_sw_xon(txq, smq, rr_quantum);
1217                 if (rc)
1218                         break;
1219                 tm_node->flags |= NIX_TM_NODE_ENABLED;
1220         }
1221
1222         if (rc)
1223                 otx2_err("TM failed to enable xmit on sq %u, rc=%d", sq, rc);
1224
1225         return rc;
1226 }
1227
1228 static int
1229 nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
1230 {
1231         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1232         uint32_t def = eth_dev->data->nb_tx_queues;
1233         struct rte_tm_node_params params;
1234         uint32_t leaf_parent, i;
1235         int rc = 0;
1236
1237         /* Default params */
1238         memset(&params, 0, sizeof(params));
1239         params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
1240
1241         if (nix_tm_have_tl1_access(dev)) {
1242                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
1243                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1244                                              DEFAULT_RR_WEIGHT,
1245                                              NIX_TXSCH_LVL_TL1,
1246                                              OTX2_TM_LVL_ROOT, false, &params);
1247                 if (rc)
1248                         goto exit;
1249                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1250                                              DEFAULT_RR_WEIGHT,
1251                                              NIX_TXSCH_LVL_TL2,
1252                                              OTX2_TM_LVL_SCH1, false, &params);
1253                 if (rc)
1254                         goto exit;
1255
1256                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1257                                              DEFAULT_RR_WEIGHT,
1258                                              NIX_TXSCH_LVL_TL3,
1259                                              OTX2_TM_LVL_SCH2, false, &params);
1260                 if (rc)
1261                         goto exit;
1262
1263                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1264                                              DEFAULT_RR_WEIGHT,
1265                                              NIX_TXSCH_LVL_TL4,
1266                                              OTX2_TM_LVL_SCH3, false, &params);
1267                 if (rc)
1268                         goto exit;
1269
1270                 rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
1271                                              DEFAULT_RR_WEIGHT,
1272                                              NIX_TXSCH_LVL_SMQ,
1273                                              OTX2_TM_LVL_SCH4, false, &params);
1274                 if (rc)
1275                         goto exit;
1276
1277                 leaf_parent = def + 4;
1278         } else {
1279                 dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
1280                 rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
1281                                              DEFAULT_RR_WEIGHT,
1282                                              NIX_TXSCH_LVL_TL2,
1283                                              OTX2_TM_LVL_ROOT, false, &params);
1284                 if (rc)
1285                         goto exit;
1286
1287                 rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
1288                                              DEFAULT_RR_WEIGHT,
1289                                              NIX_TXSCH_LVL_TL3,
1290                                              OTX2_TM_LVL_SCH1, false, &params);
1291                 if (rc)
1292                         goto exit;
1293
1294                 rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
1295                                              DEFAULT_RR_WEIGHT,
1296                                              NIX_TXSCH_LVL_TL4,
1297                                              OTX2_TM_LVL_SCH2, false, &params);
1298                 if (rc)
1299                         goto exit;
1300
1301                 rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
1302                                              DEFAULT_RR_WEIGHT,
1303                                              NIX_TXSCH_LVL_SMQ,
1304                                              OTX2_TM_LVL_SCH3, false, &params);
1305                 if (rc)
1306                         goto exit;
1307
1308                 leaf_parent = def + 3;
1309         }
1310
1311         /* Add leaf nodes */
1312         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1313                 rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
1314                                              DEFAULT_RR_WEIGHT,
1315                                              NIX_TXSCH_LVL_CNT,
1316                                              OTX2_TM_LVL_QUEUE, false, &params);
1317                 if (rc)
1318                         break;
1319         }
1320
1321 exit:
1322         return rc;
1323 }
1324
1325 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
1326 {
1327         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1328
1329         TAILQ_INIT(&dev->node_list);
1330         TAILQ_INIT(&dev->shaper_profile_list);
1331 }
1332
1333 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
1334 {
1335         struct otx2_eth_dev  *dev = otx2_eth_pmd_priv(eth_dev);
1336         uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
1337         int rc;
1338
1339         /* Free up all resources already held */
1340         rc = nix_tm_free_resources(dev, 0, 0, false);
1341         if (rc) {
1342                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1343                 return rc;
1344         }
1345
1346         /* Clear shaper profiles */
1347         nix_tm_clear_shaper_profiles(dev);
1348         dev->tm_flags = NIX_TM_DEFAULT_TREE;
1349
1350         rc = nix_tm_prepare_default_tree(eth_dev);
1351         if (rc != 0)
1352                 return rc;
1353
1354         rc = nix_tm_alloc_resources(eth_dev, false);
1355         if (rc != 0)
1356                 return rc;
1357         dev->tm_leaf_cnt = sq_cnt;
1358
1359         return 0;
1360 }
1361
1362 int
1363 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
1364 {
1365         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1366         int rc;
1367
1368         /* Xmit is assumed to be disabled */
1369         /* Free up resources already held */
1370         rc = nix_tm_free_resources(dev, 0, 0, false);
1371         if (rc) {
1372                 otx2_err("Failed to freeup existing resources,rc=%d", rc);
1373                 return rc;
1374         }
1375
1376         /* Clear shaper profiles */
1377         nix_tm_clear_shaper_profiles(dev);
1378
1379         dev->tm_flags = 0;
1380         return 0;
1381 }
1382
1383 int
1384 otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
1385                           uint32_t *rr_quantum, uint16_t *smq)
1386 {
1387         struct otx2_nix_tm_node *tm_node;
1388         int rc;
1389
1390         /* 0..sq_cnt-1 are leaf nodes */
1391         if (sq >= dev->tm_leaf_cnt)
1392                 return -EINVAL;
1393
1394         /* Search for internal node first */
1395         tm_node = nix_tm_node_search(dev, sq, false);
1396         if (!tm_node)
1397                 tm_node = nix_tm_node_search(dev, sq, true);
1398
1399         /* Check if we found a valid leaf node */
1400         if (!tm_node || tm_node->level_id != OTX2_TM_LVL_QUEUE ||
1401             !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
1402                 return -EIO;
1403         }
1404
1405         /* Get SMQ Id of leaf node's parent */
1406         *smq = tm_node->parent->hw_id;
1407         *rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX)
1408                 / MAX_SCHED_WEIGHT;
1409
1410         rc = nix_smq_xoff(dev, *smq, false);
1411         if (rc)
1412                 return rc;
1413         tm_node->flags |= NIX_TM_NODE_ENABLED;
1414
1415         return 0;
1416 }