net/cnxk: allow FC on LBK and enable TM BP on Rx pause
[dpdk.git] / drivers / net / cnxk / cnxk_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5 #include <cnxk_tm.h>
6 #include <cnxk_utils.h>
7
8 static int
9 cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
10                           int *is_leaf, struct rte_tm_error *error)
11 {
12         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13         struct roc_nix *nix = &dev->nix;
14         struct roc_nix_tm_node *node;
15
16         if (is_leaf == NULL) {
17                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
18                 return -EINVAL;
19         }
20
21         node = roc_nix_tm_node_get(nix, node_id);
22         if (node_id == RTE_TM_NODE_ID_NULL || !node) {
23                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
24                 return -EINVAL;
25         }
26
27         if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
28                 *is_leaf = true;
29         else
30                 *is_leaf = false;
31
32         return 0;
33 }
34
35 static int
36 cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
37                      struct rte_tm_capabilities *cap,
38                      struct rte_tm_error *error)
39 {
40         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
41         int rc, max_nr_nodes = 0, i, n_lvl;
42         struct roc_nix *nix = &dev->nix;
43         uint16_t schq[ROC_TM_LVL_MAX];
44
45         memset(cap, 0, sizeof(*cap));
46
47         rc = roc_nix_tm_rsrc_count(nix, schq);
48         if (rc) {
49                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
50                 error->message = "unexpected fatal error";
51                 return rc;
52         }
53
54         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
55                 max_nr_nodes += schq[i];
56
57         cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
58
59         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
60         /* Consider leaf level */
61         cap->n_levels_max = n_lvl + 1;
62         cap->non_leaf_nodes_identical = 1;
63         cap->leaf_nodes_identical = 1;
64
65         /* Shaper Capabilities */
66         cap->shaper_private_n_max = max_nr_nodes;
67         cap->shaper_n_max = max_nr_nodes;
68         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
69         cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
70         cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
71         cap->shaper_private_packet_mode_supported = 1;
72         cap->shaper_private_byte_mode_supported = 1;
73         cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
74         cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
75
76         /* Schedule Capabilities */
77         cap->sched_n_children_max = schq[n_lvl - 1];
78         cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
79         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
80         cap->sched_wfq_n_groups_max = 1;
81         cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
82         cap->sched_wfq_packet_mode_supported = 1;
83         cap->sched_wfq_byte_mode_supported = 1;
84
85         cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
86                                    RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
87         cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
88                           RTE_TM_STATS_N_PKTS_RED_DROPPED |
89                           RTE_TM_STATS_N_BYTES_RED_DROPPED;
90
91         for (i = 0; i < RTE_COLORS; i++) {
92                 cap->mark_vlan_dei_supported[i] = false;
93                 cap->mark_ip_ecn_tcp_supported[i] = false;
94                 cap->mark_ip_dscp_supported[i] = false;
95         }
96
97         return 0;
98 }
99
100 static int
101 cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
102                            struct rte_tm_level_capabilities *cap,
103                            struct rte_tm_error *error)
104 {
105         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
106         struct roc_nix *nix = &dev->nix;
107         uint16_t schq[ROC_TM_LVL_MAX];
108         int rc, n_lvl;
109
110         memset(cap, 0, sizeof(*cap));
111
112         rc = roc_nix_tm_rsrc_count(nix, schq);
113         if (rc) {
114                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
115                 error->message = "unexpected fatal error";
116                 return rc;
117         }
118
119         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
120
121         if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
122                 /* Leaf */
123                 cap->n_nodes_max = dev->nb_txq;
124                 cap->n_nodes_leaf_max = dev->nb_txq;
125                 cap->leaf_nodes_identical = 1;
126                 cap->leaf.stats_mask =
127                         RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
128
129         } else if (lvl == ROC_TM_LVL_ROOT) {
130                 /* Root node, a.k.a. TL2(vf)/TL1(pf) */
131                 cap->n_nodes_max = 1;
132                 cap->n_nodes_nonleaf_max = 1;
133                 cap->non_leaf_nodes_identical = 1;
134
135                 cap->nonleaf.shaper_private_supported = true;
136                 cap->nonleaf.shaper_private_dual_rate_supported =
137                         roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
138                                                                     true;
139                 cap->nonleaf.shaper_private_rate_min =
140                         NIX_TM_MIN_SHAPER_RATE / 8;
141                 cap->nonleaf.shaper_private_rate_max =
142                         NIX_TM_MAX_SHAPER_RATE / 8;
143                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
144                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
145
146                 cap->nonleaf.sched_n_children_max = schq[lvl];
147                 cap->nonleaf.sched_sp_n_priorities_max =
148                         roc_nix_tm_max_prio(nix, lvl) + 1;
149                 cap->nonleaf.sched_wfq_n_groups_max = 1;
150                 cap->nonleaf.sched_wfq_weight_max =
151                         roc_nix_tm_max_sched_wt_get();
152                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
153                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
154
155                 if (roc_nix_tm_lvl_have_link_access(nix, lvl))
156                         cap->nonleaf.stats_mask =
157                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
158                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
159         } else if (lvl < ROC_TM_LVL_MAX) {
160                 /* TL2, TL3, TL4, MDQ */
161                 cap->n_nodes_max = schq[lvl];
162                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
163                 cap->non_leaf_nodes_identical = 1;
164
165                 cap->nonleaf.shaper_private_supported = true;
166                 cap->nonleaf.shaper_private_dual_rate_supported = true;
167                 cap->nonleaf.shaper_private_rate_min =
168                         NIX_TM_MIN_SHAPER_RATE / 8;
169                 cap->nonleaf.shaper_private_rate_max =
170                         NIX_TM_MAX_SHAPER_RATE / 8;
171                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
172                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
173
174                 /* MDQ doesn't support Strict Priority */
175                 if ((int)lvl == (n_lvl - 1))
176                         cap->nonleaf.sched_n_children_max = dev->nb_txq;
177                 else
178                         cap->nonleaf.sched_n_children_max = schq[lvl - 1];
179                 cap->nonleaf.sched_sp_n_priorities_max =
180                         roc_nix_tm_max_prio(nix, lvl) + 1;
181                 cap->nonleaf.sched_wfq_n_groups_max = 1;
182                 cap->nonleaf.sched_wfq_weight_max =
183                         roc_nix_tm_max_sched_wt_get();
184                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
185                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
186         } else {
187                 /* unsupported level */
188                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
189                 return rc;
190         }
191         return 0;
192 }
193
194 static int
195 cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
196                           struct rte_tm_node_capabilities *cap,
197                           struct rte_tm_error *error)
198 {
199         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
200         struct cnxk_nix_tm_node *tm_node;
201         struct roc_nix *nix = &dev->nix;
202         uint16_t schq[ROC_TM_LVL_MAX];
203         int rc, n_lvl, lvl;
204
205         memset(cap, 0, sizeof(*cap));
206
207         tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
208         if (!tm_node) {
209                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
210                 error->message = "no such node";
211                 return -EINVAL;
212         }
213
214         lvl = tm_node->nix_node.lvl;
215         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
216
217         /* Leaf node */
218         if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
219                 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
220                 return 0;
221         }
222
223         rc = roc_nix_tm_rsrc_count(nix, schq);
224         if (rc) {
225                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
226                 error->message = "unexpected fatal error";
227                 return rc;
228         }
229
230         /* Non Leaf Shaper */
231         cap->shaper_private_supported = true;
232         cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
233         cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
234         cap->shaper_private_packet_mode_supported = 1;
235         cap->shaper_private_byte_mode_supported = 1;
236
237         /* Non Leaf Scheduler */
238         if (lvl == (n_lvl - 1))
239                 cap->nonleaf.sched_n_children_max = dev->nb_txq;
240         else
241                 cap->nonleaf.sched_n_children_max = schq[lvl - 1];
242
243         cap->nonleaf.sched_sp_n_priorities_max =
244                 roc_nix_tm_max_prio(nix, lvl) + 1;
245         cap->nonleaf.sched_wfq_n_children_per_group_max =
246                 cap->nonleaf.sched_n_children_max;
247         cap->nonleaf.sched_wfq_n_groups_max = 1;
248         cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
249         cap->nonleaf.sched_wfq_packet_mode_supported = 1;
250         cap->nonleaf.sched_wfq_byte_mode_supported = 1;
251
252         cap->shaper_private_dual_rate_supported = true;
253         if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
254                 cap->shaper_private_dual_rate_supported = false;
255                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
256                                   RTE_TM_STATS_N_BYTES_RED_DROPPED;
257         }
258
259         return 0;
260 }
261
262 static int
263 cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
264                                struct rte_tm_shaper_params *params,
265                                struct rte_tm_error *error)
266 {
267         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
268         struct cnxk_nix_tm_shaper_profile *profile;
269         struct roc_nix *nix = &dev->nix;
270         int rc;
271
272         if (roc_nix_tm_shaper_profile_get(nix, id)) {
273                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
274                 error->message = "shaper profile ID exist";
275                 return -EINVAL;
276         }
277
278         profile = rte_zmalloc("cnxk_nix_tm_shaper_profile",
279                               sizeof(struct cnxk_nix_tm_shaper_profile), 0);
280         if (!profile)
281                 return -ENOMEM;
282         profile->profile.id = id;
283         profile->profile.commit_rate = params->committed.rate;
284         profile->profile.peak_rate = params->peak.rate;
285         profile->profile.commit_sz = params->committed.size;
286         profile->profile.peak_sz = params->peak.size;
287         /* If Byte mode, then convert to bps */
288         if (!params->packet_mode) {
289                 profile->profile.commit_rate *= 8;
290                 profile->profile.peak_rate *= 8;
291                 profile->profile.commit_sz *= 8;
292                 profile->profile.peak_sz *= 8;
293         }
294         profile->profile.pkt_len_adj = params->pkt_length_adjust;
295         profile->profile.pkt_mode = params->packet_mode;
296         profile->profile.free_fn = rte_free;
297         rte_memcpy(&profile->params, params,
298                    sizeof(struct rte_tm_shaper_params));
299
300         rc = roc_nix_tm_shaper_profile_add(nix, &profile->profile);
301
302         /* fill error information based on return value */
303         if (rc) {
304                 error->type = roc_nix_tm_err_to_rte_err(rc);
305                 error->message = roc_error_msg_get(rc);
306         }
307
308         return rc;
309 }
310
311 static int
312 cnxk_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
313                                   uint32_t profile_id,
314                                   struct rte_tm_error *error)
315 {
316         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
317         struct roc_nix *nix = &dev->nix;
318         int rc;
319
320         rc = roc_nix_tm_shaper_profile_delete(nix, profile_id);
321         if (rc) {
322                 error->type = roc_nix_tm_err_to_rte_err(rc);
323                 error->message = roc_error_msg_get(rc);
324         }
325
326         return rc;
327 }
328
329 static int
330 cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
331                      uint32_t parent_node_id, uint32_t priority,
332                      uint32_t weight, uint32_t lvl,
333                      struct rte_tm_node_params *params,
334                      struct rte_tm_error *error)
335 {
336         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
337         struct roc_nix_tm_shaper_profile *profile;
338         struct roc_nix_tm_node *parent_node;
339         struct roc_nix *nix = &dev->nix;
340         struct cnxk_nix_tm_node *node;
341         int rc;
342
343         /* we don't support dynamic updates */
344         if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
345                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
346                 error->message = "dynamic update not supported";
347                 return -EIO;
348         }
349
350         parent_node = roc_nix_tm_node_get(nix, parent_node_id);
351         /* find the right level */
352         if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
353                 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
354                         lvl = ROC_TM_LVL_ROOT;
355                 } else if (parent_node) {
356                         lvl = parent_node->lvl + 1;
357                 } else {
358                         /* Neither proper parent nor proper level id given */
359                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
360                         error->message = "invalid parent node id";
361                         return -ERANGE;
362                 }
363         }
364
365         node = rte_zmalloc("cnxk_nix_tm_node", sizeof(struct cnxk_nix_tm_node),
366                            0);
367         if (!node)
368                 return -ENOMEM;
369
370         rte_memcpy(&node->params, params, sizeof(struct rte_tm_node_params));
371
372         node->nix_node.id = node_id;
373         node->nix_node.parent_id = parent_node_id;
374         node->nix_node.priority = priority;
375         node->nix_node.weight = weight;
376         node->nix_node.lvl = lvl;
377         node->nix_node.shaper_profile_id = params->shaper_profile_id;
378
379         profile = roc_nix_tm_shaper_profile_get(nix, params->shaper_profile_id);
380         /* Packet mode */
381         if (!roc_nix_tm_lvl_is_leaf(nix, lvl) &&
382             ((profile && profile->pkt_mode) ||
383              (params->nonleaf.wfq_weight_mode &&
384               params->nonleaf.n_sp_priorities &&
385               !params->nonleaf.wfq_weight_mode[0])))
386                 node->nix_node.pkt_mode = 1;
387
388         rc = roc_nix_tm_node_add(nix, &node->nix_node);
389         if (rc < 0) {
390                 error->type = roc_nix_tm_err_to_rte_err(rc);
391                 error->message = roc_error_msg_get(rc);
392                 return rc;
393         }
394         error->type = RTE_TM_ERROR_TYPE_NONE;
395         roc_nix_tm_shaper_default_red_algo(&node->nix_node, profile);
396
397         return 0;
398 }
399
400 static int
401 cnxk_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
402                         struct rte_tm_error *error)
403 {
404         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
405         struct roc_nix *nix = &dev->nix;
406         struct cnxk_nix_tm_node *node;
407         int rc;
408
409         /* we don't support dynamic updates yet */
410         if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
411                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
412                 error->message = "hierarchy exists";
413                 return -EIO;
414         }
415
416         if (node_id == RTE_TM_NODE_ID_NULL) {
417                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
418                 error->message = "invalid node id";
419                 return -EINVAL;
420         }
421
422         node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
423
424         rc = roc_nix_tm_node_delete(nix, node_id, 0);
425         if (rc) {
426                 error->type = roc_nix_tm_err_to_rte_err(rc);
427                 error->message = roc_error_msg_get(rc);
428         } else {
429                 rte_free(node);
430         }
431
432         return rc;
433 }
434
435 static int
436 cnxk_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
437                          struct rte_tm_error *error)
438 {
439         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
440         int rc;
441
442         rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, true);
443         if (rc) {
444                 error->type = roc_nix_tm_err_to_rte_err(rc);
445                 error->message = roc_error_msg_get(rc);
446         }
447
448         return rc;
449 }
450
451 static int
452 cnxk_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
453                         struct rte_tm_error *error)
454 {
455         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
456         int rc;
457
458         rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, false);
459         if (rc) {
460                 error->type = roc_nix_tm_err_to_rte_err(rc);
461                 error->message = roc_error_msg_get(rc);
462         }
463
464         return rc;
465 }
466
467 static int
468 cnxk_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
469                              int clear_on_fail __rte_unused,
470                              struct rte_tm_error *error)
471 {
472         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
473         struct roc_nix *nix = &dev->nix;
474         int rc;
475
476         if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
477                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
478                 error->message = "hierarchy exists";
479                 return -EIO;
480         }
481
482         if (roc_nix_tm_leaf_cnt(nix) < dev->nb_txq) {
483                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
484                 error->message = "incomplete hierarchy";
485                 return -EINVAL;
486         }
487
488         rc = roc_nix_tm_hierarchy_disable(nix);
489         if (rc) {
490                 error->type = roc_nix_tm_err_to_rte_err(rc);
491                 error->message = roc_error_msg_get(rc);
492                 return -EIO;
493         }
494
495         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_USER, true);
496         if (rc) {
497                 error->type = roc_nix_tm_err_to_rte_err(rc);
498                 error->message = roc_error_msg_get(rc);
499                 return -EIO;
500         }
501         error->type = RTE_TM_ERROR_TYPE_NONE;
502
503         return 0;
504 }
505
506 static int
507 cnxk_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
508                                uint32_t profile_id, struct rte_tm_error *error)
509 {
510         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
511         struct roc_nix_tm_shaper_profile *profile;
512         struct roc_nix *nix = &dev->nix;
513         struct roc_nix_tm_node *node;
514         int rc;
515
516         rc = roc_nix_tm_node_shaper_update(nix, node_id, profile_id, false);
517         if (rc) {
518                 error->type = roc_nix_tm_err_to_rte_err(rc);
519                 error->message = roc_error_msg_get(rc);
520                 return -EINVAL;
521         }
522         node = roc_nix_tm_node_get(nix, node_id);
523         if (!node)
524                 return -EINVAL;
525
526         profile = roc_nix_tm_shaper_profile_get(nix, profile_id);
527         roc_nix_tm_shaper_default_red_algo(node, profile);
528
529         return 0;
530 }
531
532 static int
533 cnxk_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
534                                uint32_t new_parent_id, uint32_t priority,
535                                uint32_t weight, struct rte_tm_error *error)
536 {
537         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
538         struct roc_nix *nix = &dev->nix;
539         int rc;
540
541         rc = roc_nix_tm_node_parent_update(nix, node_id, new_parent_id,
542                                            priority, weight);
543         if (rc) {
544                 error->type = roc_nix_tm_err_to_rte_err(rc);
545                 error->message = roc_error_msg_get(rc);
546                 return -EINVAL;
547         }
548
549         return 0;
550 }
551
552 static int
553 cnxk_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
554                             struct rte_tm_node_stats *stats,
555                             uint64_t *stats_mask, int clear,
556                             struct rte_tm_error *error)
557 {
558         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
559         struct roc_nix_tm_node_stats nix_tm_stats;
560         struct roc_nix *nix = &dev->nix;
561         struct roc_nix_tm_node *node;
562         int rc;
563
564         node = roc_nix_tm_node_get(nix, node_id);
565         if (!node) {
566                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
567                 error->message = "no such node";
568                 return -EINVAL;
569         }
570
571         if (roc_nix_tm_lvl_is_leaf(nix, node->lvl)) {
572                 struct roc_nix_stats_queue qstats;
573
574                 rc = roc_nix_stats_queue_get(nix, node->id, 0, &qstats);
575                 if (!rc) {
576                         stats->n_pkts = qstats.tx_pkts;
577                         stats->n_bytes = qstats.tx_octs;
578                         *stats_mask =
579                                 RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
580                 }
581                 goto exit;
582         }
583
584         rc = roc_nix_tm_node_stats_get(nix, node_id, clear, &nix_tm_stats);
585         if (!rc) {
586                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
587                         nix_tm_stats.stats[ROC_NIX_TM_NODE_PKTS_DROPPED];
588                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
589                         nix_tm_stats.stats[ROC_NIX_TM_NODE_BYTES_DROPPED];
590                 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
591                               RTE_TM_STATS_N_BYTES_RED_DROPPED;
592         }
593
594 exit:
595         if (rc) {
596                 error->type = roc_nix_tm_err_to_rte_err(rc);
597                 error->message = roc_error_msg_get(rc);
598         }
599         return rc;
600 }
601
602 const struct rte_tm_ops cnxk_tm_ops = {
603         .node_type_get = cnxk_nix_tm_node_type_get,
604         .capabilities_get = cnxk_nix_tm_capa_get,
605         .level_capabilities_get = cnxk_nix_tm_level_capa_get,
606         .node_capabilities_get = cnxk_nix_tm_node_capa_get,
607
608         .shaper_profile_add = cnxk_nix_tm_shaper_profile_add,
609         .shaper_profile_delete = cnxk_nix_tm_shaper_profile_delete,
610
611         .node_add = cnxk_nix_tm_node_add,
612         .node_delete = cnxk_nix_tm_node_delete,
613         .node_suspend = cnxk_nix_tm_node_suspend,
614         .node_resume = cnxk_nix_tm_node_resume,
615         .hierarchy_commit = cnxk_nix_tm_hierarchy_commit,
616
617         .node_shaper_update = cnxk_nix_tm_node_shaper_update,
618         .node_parent_update = cnxk_nix_tm_node_parent_update,
619         .node_stats_read = cnxk_nix_tm_node_stats_read,
620 };
621
622 int
623 cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
624 {
625         if (!arg)
626                 return -EINVAL;
627
628         /* Check for supported revisions */
629         if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
630                 return -EINVAL;
631
632         *(const void **)arg = &cnxk_tm_ops;
633
634         return 0;
635 }
636
637 int
638 cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
639                                  uint16_t queue_idx, uint16_t tx_rate_mbps)
640 {
641         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
642         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
643         struct roc_nix *nix = &dev->nix;
644         int rc = -EINVAL;
645
646         /* Check for supported revisions */
647         if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
648                 goto exit;
649
650         if (queue_idx >= eth_dev->data->nb_tx_queues)
651                 goto exit;
652
653         if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
654             eth_dev->data->nb_tx_queues > 1) {
655                 /*
656                  * Disable xmit will be enabled when
657                  * new topology is available.
658                  */
659                 rc = roc_nix_tm_hierarchy_disable(nix);
660                 if (rc)
661                         goto exit;
662
663                 rc = roc_nix_tm_prepare_rate_limited_tree(nix);
664                 if (rc)
665                         goto exit;
666
667                 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
668                 if (rc)
669                         goto exit;
670         }
671
672         return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);
673 exit:
674         return rc;
675 }