net/cnxk: disable default inner checksum for outbound inline
[dpdk.git] / drivers / net / cnxk / cnxk_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5 #include <cnxk_tm.h>
6 #include <cnxk_utils.h>
7
8 static int
9 cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
10                           int *is_leaf, struct rte_tm_error *error)
11 {
12         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13         struct roc_nix *nix = &dev->nix;
14         struct roc_nix_tm_node *node;
15
16         if (is_leaf == NULL) {
17                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
18                 return -EINVAL;
19         }
20
21         node = roc_nix_tm_node_get(nix, node_id);
22         if (node_id == RTE_TM_NODE_ID_NULL || !node) {
23                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
24                 return -EINVAL;
25         }
26
27         if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
28                 *is_leaf = true;
29         else
30                 *is_leaf = false;
31
32         return 0;
33 }
34
35 static int
36 cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
37                      struct rte_tm_capabilities *cap,
38                      struct rte_tm_error *error)
39 {
40         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
41         int rc, max_nr_nodes = 0, i, n_lvl;
42         struct roc_nix *nix = &dev->nix;
43         uint16_t schq[ROC_TM_LVL_MAX];
44
45         memset(cap, 0, sizeof(*cap));
46
47         rc = roc_nix_tm_rsrc_count(nix, schq);
48         if (rc) {
49                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
50                 error->message = "unexpected fatal error";
51                 return rc;
52         }
53
54         for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
55                 max_nr_nodes += schq[i];
56
57         cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
58
59         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
60         /* Consider leaf level */
61         cap->n_levels_max = n_lvl + 1;
62         cap->non_leaf_nodes_identical = 1;
63         cap->leaf_nodes_identical = 1;
64
65         /* Shaper Capabilities */
66         cap->shaper_private_n_max = max_nr_nodes;
67         cap->shaper_n_max = max_nr_nodes;
68         cap->shaper_private_dual_rate_n_max = max_nr_nodes;
69         cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
70         cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
71         cap->shaper_private_packet_mode_supported = 1;
72         cap->shaper_private_byte_mode_supported = 1;
73         cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
74         cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
75
76         /* Schedule Capabilities */
77         cap->sched_n_children_max = schq[n_lvl - 1];
78         cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
79         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
80         cap->sched_wfq_n_groups_max = 1;
81         cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
82         cap->sched_wfq_packet_mode_supported = 1;
83         cap->sched_wfq_byte_mode_supported = 1;
84
85         cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
86                                    RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
87         cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
88                           RTE_TM_STATS_N_PKTS_RED_DROPPED |
89                           RTE_TM_STATS_N_BYTES_RED_DROPPED;
90
91         cap->mark_vlan_dei_supported[RTE_COLOR_GREEN] = false;
92         cap->mark_ip_ecn_tcp_supported[RTE_COLOR_GREEN] = false;
93         cap->mark_ip_ecn_sctp_supported[RTE_COLOR_GREEN] = false;
94         cap->mark_ip_dscp_supported[RTE_COLOR_GREEN] = false;
95
96         for (i = RTE_COLOR_YELLOW; i < RTE_COLORS; i++) {
97                 cap->mark_vlan_dei_supported[i] = true;
98                 cap->mark_ip_ecn_tcp_supported[i] = true;
99                 cap->mark_ip_ecn_sctp_supported[i] = true;
100                 cap->mark_ip_dscp_supported[i] = true;
101         }
102
103         return 0;
104 }
105
106 static int
107 cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
108                            struct rte_tm_level_capabilities *cap,
109                            struct rte_tm_error *error)
110 {
111         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
112         struct roc_nix *nix = &dev->nix;
113         uint16_t schq[ROC_TM_LVL_MAX];
114         int rc, n_lvl;
115
116         memset(cap, 0, sizeof(*cap));
117
118         rc = roc_nix_tm_rsrc_count(nix, schq);
119         if (rc) {
120                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
121                 error->message = "unexpected fatal error";
122                 return rc;
123         }
124
125         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
126
127         if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
128                 /* Leaf */
129                 cap->n_nodes_max = dev->nb_txq;
130                 cap->n_nodes_leaf_max = dev->nb_txq;
131                 cap->leaf_nodes_identical = 1;
132                 cap->leaf.stats_mask =
133                         RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
134
135         } else if (lvl == ROC_TM_LVL_ROOT) {
136                 /* Root node, a.k.a. TL2(vf)/TL1(pf) */
137                 cap->n_nodes_max = 1;
138                 cap->n_nodes_nonleaf_max = 1;
139                 cap->non_leaf_nodes_identical = 1;
140
141                 cap->nonleaf.shaper_private_supported = true;
142                 cap->nonleaf.shaper_private_dual_rate_supported =
143                         roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
144                                                                     true;
145                 cap->nonleaf.shaper_private_rate_min =
146                         NIX_TM_MIN_SHAPER_RATE / 8;
147                 cap->nonleaf.shaper_private_rate_max =
148                         NIX_TM_MAX_SHAPER_RATE / 8;
149                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
150                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
151
152                 cap->nonleaf.sched_n_children_max = schq[lvl];
153                 cap->nonleaf.sched_sp_n_priorities_max =
154                         roc_nix_tm_max_prio(nix, lvl) + 1;
155                 cap->nonleaf.sched_wfq_n_groups_max = 1;
156                 cap->nonleaf.sched_wfq_weight_max =
157                         roc_nix_tm_max_sched_wt_get();
158                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
159                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
160
161                 if (roc_nix_tm_lvl_have_link_access(nix, lvl))
162                         cap->nonleaf.stats_mask =
163                                 RTE_TM_STATS_N_PKTS_RED_DROPPED |
164                                 RTE_TM_STATS_N_BYTES_RED_DROPPED;
165         } else if (lvl < ROC_TM_LVL_MAX) {
166                 /* TL2, TL3, TL4, MDQ */
167                 cap->n_nodes_max = schq[lvl];
168                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
169                 cap->non_leaf_nodes_identical = 1;
170
171                 cap->nonleaf.shaper_private_supported = true;
172                 cap->nonleaf.shaper_private_dual_rate_supported = true;
173                 cap->nonleaf.shaper_private_rate_min =
174                         NIX_TM_MIN_SHAPER_RATE / 8;
175                 cap->nonleaf.shaper_private_rate_max =
176                         NIX_TM_MAX_SHAPER_RATE / 8;
177                 cap->nonleaf.shaper_private_packet_mode_supported = 1;
178                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
179
180                 /* MDQ doesn't support Strict Priority */
181                 if ((int)lvl == (n_lvl - 1))
182                         cap->nonleaf.sched_n_children_max = dev->nb_txq;
183                 else
184                         cap->nonleaf.sched_n_children_max = schq[lvl - 1];
185                 cap->nonleaf.sched_sp_n_priorities_max =
186                         roc_nix_tm_max_prio(nix, lvl) + 1;
187                 cap->nonleaf.sched_wfq_n_groups_max = 1;
188                 cap->nonleaf.sched_wfq_weight_max =
189                         roc_nix_tm_max_sched_wt_get();
190                 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
191                 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
192         } else {
193                 /* unsupported level */
194                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
195                 return rc;
196         }
197         return 0;
198 }
199
200 static int
201 cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
202                           struct rte_tm_node_capabilities *cap,
203                           struct rte_tm_error *error)
204 {
205         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
206         struct cnxk_nix_tm_node *tm_node;
207         struct roc_nix *nix = &dev->nix;
208         uint16_t schq[ROC_TM_LVL_MAX];
209         int rc, n_lvl, lvl;
210
211         memset(cap, 0, sizeof(*cap));
212
213         tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
214         if (!tm_node) {
215                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
216                 error->message = "no such node";
217                 return -EINVAL;
218         }
219
220         lvl = tm_node->nix_node.lvl;
221         n_lvl = roc_nix_tm_lvl_cnt_get(nix);
222
223         /* Leaf node */
224         if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
225                 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
226                 return 0;
227         }
228
229         rc = roc_nix_tm_rsrc_count(nix, schq);
230         if (rc) {
231                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
232                 error->message = "unexpected fatal error";
233                 return rc;
234         }
235
236         /* Non Leaf Shaper */
237         cap->shaper_private_supported = true;
238         cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
239         cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
240         cap->shaper_private_packet_mode_supported = 1;
241         cap->shaper_private_byte_mode_supported = 1;
242
243         /* Non Leaf Scheduler */
244         if (lvl == (n_lvl - 1))
245                 cap->nonleaf.sched_n_children_max = dev->nb_txq;
246         else
247                 cap->nonleaf.sched_n_children_max = schq[lvl - 1];
248
249         cap->nonleaf.sched_sp_n_priorities_max =
250                 roc_nix_tm_max_prio(nix, lvl) + 1;
251         cap->nonleaf.sched_wfq_n_children_per_group_max =
252                 cap->nonleaf.sched_n_children_max;
253         cap->nonleaf.sched_wfq_n_groups_max = 1;
254         cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
255         cap->nonleaf.sched_wfq_packet_mode_supported = 1;
256         cap->nonleaf.sched_wfq_byte_mode_supported = 1;
257
258         cap->shaper_private_dual_rate_supported = true;
259         if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
260                 cap->shaper_private_dual_rate_supported = false;
261                 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
262                                   RTE_TM_STATS_N_BYTES_RED_DROPPED;
263         }
264
265         return 0;
266 }
267
268 static int
269 cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
270                                struct rte_tm_shaper_params *params,
271                                struct rte_tm_error *error)
272 {
273         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
274         struct cnxk_nix_tm_shaper_profile *profile;
275         struct roc_nix *nix = &dev->nix;
276         int rc;
277
278         if (roc_nix_tm_shaper_profile_get(nix, id)) {
279                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
280                 error->message = "shaper profile ID exist";
281                 return -EINVAL;
282         }
283
284         profile = rte_zmalloc("cnxk_nix_tm_shaper_profile",
285                               sizeof(struct cnxk_nix_tm_shaper_profile), 0);
286         if (!profile)
287                 return -ENOMEM;
288         profile->profile.id = id;
289         profile->profile.commit_rate = params->committed.rate;
290         profile->profile.peak_rate = params->peak.rate;
291         profile->profile.commit_sz = params->committed.size;
292         profile->profile.peak_sz = params->peak.size;
293         /* If Byte mode, then convert to bps */
294         if (!params->packet_mode) {
295                 profile->profile.commit_rate *= 8;
296                 profile->profile.peak_rate *= 8;
297                 profile->profile.commit_sz *= 8;
298                 profile->profile.peak_sz *= 8;
299         }
300         profile->profile.pkt_len_adj = params->pkt_length_adjust;
301         profile->profile.pkt_mode = params->packet_mode;
302         profile->profile.free_fn = rte_free;
303         rte_memcpy(&profile->params, params,
304                    sizeof(struct rte_tm_shaper_params));
305
306         rc = roc_nix_tm_shaper_profile_add(nix, &profile->profile);
307
308         /* fill error information based on return value */
309         if (rc) {
310                 error->type = roc_nix_tm_err_to_rte_err(rc);
311                 error->message = roc_error_msg_get(rc);
312         }
313
314         return rc;
315 }
316
317 static int
318 cnxk_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
319                                   uint32_t profile_id,
320                                   struct rte_tm_error *error)
321 {
322         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
323         struct roc_nix *nix = &dev->nix;
324         int rc;
325
326         rc = roc_nix_tm_shaper_profile_delete(nix, profile_id);
327         if (rc) {
328                 error->type = roc_nix_tm_err_to_rte_err(rc);
329                 error->message = roc_error_msg_get(rc);
330         }
331
332         return rc;
333 }
334
335 static int
336 cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
337                      uint32_t parent_node_id, uint32_t priority,
338                      uint32_t weight, uint32_t lvl,
339                      struct rte_tm_node_params *params,
340                      struct rte_tm_error *error)
341 {
342         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
343         struct roc_nix_tm_shaper_profile *profile;
344         struct roc_nix_tm_node *parent_node;
345         struct roc_nix *nix = &dev->nix;
346         struct cnxk_nix_tm_node *node;
347         int rc;
348
349         /* we don't support dynamic updates */
350         if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
351                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
352                 error->message = "dynamic update not supported";
353                 return -EIO;
354         }
355
356         parent_node = roc_nix_tm_node_get(nix, parent_node_id);
357         /* find the right level */
358         if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
359                 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
360                         lvl = ROC_TM_LVL_ROOT;
361                 } else if (parent_node) {
362                         lvl = parent_node->lvl + 1;
363                 } else {
364                         /* Neither proper parent nor proper level id given */
365                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
366                         error->message = "invalid parent node id";
367                         return -ERANGE;
368                 }
369         }
370
371         node = rte_zmalloc("cnxk_nix_tm_node", sizeof(struct cnxk_nix_tm_node),
372                            0);
373         if (!node)
374                 return -ENOMEM;
375
376         rte_memcpy(&node->params, params, sizeof(struct rte_tm_node_params));
377
378         node->nix_node.id = node_id;
379         node->nix_node.parent_id = parent_node_id;
380         node->nix_node.priority = priority;
381         node->nix_node.weight = weight;
382         node->nix_node.lvl = lvl;
383         node->nix_node.shaper_profile_id = params->shaper_profile_id;
384
385         profile = roc_nix_tm_shaper_profile_get(nix, params->shaper_profile_id);
386         /* Packet mode */
387         if (!roc_nix_tm_lvl_is_leaf(nix, lvl) &&
388             ((profile && profile->pkt_mode) ||
389              (params->nonleaf.wfq_weight_mode &&
390               params->nonleaf.n_sp_priorities &&
391               !params->nonleaf.wfq_weight_mode[0])))
392                 node->nix_node.pkt_mode = 1;
393
394         rc = roc_nix_tm_node_add(nix, &node->nix_node);
395         if (rc < 0) {
396                 error->type = roc_nix_tm_err_to_rte_err(rc);
397                 error->message = roc_error_msg_get(rc);
398                 return rc;
399         }
400         error->type = RTE_TM_ERROR_TYPE_NONE;
401         roc_nix_tm_shaper_default_red_algo(&node->nix_node, profile);
402
403         return 0;
404 }
405
406 static int
407 cnxk_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
408                         struct rte_tm_error *error)
409 {
410         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
411         struct roc_nix *nix = &dev->nix;
412         struct cnxk_nix_tm_node *node;
413         int rc;
414
415         /* we don't support dynamic updates yet */
416         if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
417                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
418                 error->message = "hierarchy exists";
419                 return -EIO;
420         }
421
422         if (node_id == RTE_TM_NODE_ID_NULL) {
423                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
424                 error->message = "invalid node id";
425                 return -EINVAL;
426         }
427
428         node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
429
430         rc = roc_nix_tm_node_delete(nix, node_id, 0);
431         if (rc) {
432                 error->type = roc_nix_tm_err_to_rte_err(rc);
433                 error->message = roc_error_msg_get(rc);
434         } else {
435                 rte_free(node);
436         }
437
438         return rc;
439 }
440
441 static int
442 cnxk_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
443                          struct rte_tm_error *error)
444 {
445         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
446         int rc;
447
448         rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, true);
449         if (rc) {
450                 error->type = roc_nix_tm_err_to_rte_err(rc);
451                 error->message = roc_error_msg_get(rc);
452         }
453
454         return rc;
455 }
456
457 static int
458 cnxk_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
459                         struct rte_tm_error *error)
460 {
461         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
462         int rc;
463
464         rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, false);
465         if (rc) {
466                 error->type = roc_nix_tm_err_to_rte_err(rc);
467                 error->message = roc_error_msg_get(rc);
468         }
469
470         return rc;
471 }
472
473 static int
474 cnxk_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
475                              int clear_on_fail __rte_unused,
476                              struct rte_tm_error *error)
477 {
478         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
479         struct roc_nix *nix = &dev->nix;
480         int rc;
481
482         if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
483                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
484                 error->message = "hierarchy exists";
485                 return -EIO;
486         }
487
488         if (roc_nix_tm_leaf_cnt(nix) < dev->nb_txq) {
489                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
490                 error->message = "incomplete hierarchy";
491                 return -EINVAL;
492         }
493
494         rc = roc_nix_tm_hierarchy_disable(nix);
495         if (rc) {
496                 error->type = roc_nix_tm_err_to_rte_err(rc);
497                 error->message = roc_error_msg_get(rc);
498                 return -EIO;
499         }
500
501         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_USER, true);
502         if (rc) {
503                 error->type = roc_nix_tm_err_to_rte_err(rc);
504                 error->message = roc_error_msg_get(rc);
505                 return -EIO;
506         }
507         error->type = RTE_TM_ERROR_TYPE_NONE;
508
509         return 0;
510 }
511
512 static int
513 cnxk_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
514                                uint32_t profile_id, struct rte_tm_error *error)
515 {
516         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
517         struct roc_nix_tm_shaper_profile *profile;
518         struct roc_nix *nix = &dev->nix;
519         struct roc_nix_tm_node *node;
520         int rc;
521
522         rc = roc_nix_tm_node_shaper_update(nix, node_id, profile_id, false);
523         if (rc) {
524                 error->type = roc_nix_tm_err_to_rte_err(rc);
525                 error->message = roc_error_msg_get(rc);
526                 return -EINVAL;
527         }
528         node = roc_nix_tm_node_get(nix, node_id);
529         if (!node)
530                 return -EINVAL;
531
532         profile = roc_nix_tm_shaper_profile_get(nix, profile_id);
533         roc_nix_tm_shaper_default_red_algo(node, profile);
534
535         return 0;
536 }
537
538 static int
539 cnxk_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
540                                uint32_t new_parent_id, uint32_t priority,
541                                uint32_t weight, struct rte_tm_error *error)
542 {
543         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
544         struct roc_nix *nix = &dev->nix;
545         int rc;
546
547         rc = roc_nix_tm_node_parent_update(nix, node_id, new_parent_id,
548                                            priority, weight);
549         if (rc) {
550                 error->type = roc_nix_tm_err_to_rte_err(rc);
551                 error->message = roc_error_msg_get(rc);
552                 return -EINVAL;
553         }
554
555         return 0;
556 }
557
558 static int
559 cnxk_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
560                             struct rte_tm_node_stats *stats,
561                             uint64_t *stats_mask, int clear,
562                             struct rte_tm_error *error)
563 {
564         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
565         struct roc_nix_tm_node_stats nix_tm_stats;
566         struct roc_nix *nix = &dev->nix;
567         struct roc_nix_tm_node *node;
568         int rc;
569
570         node = roc_nix_tm_node_get(nix, node_id);
571         if (!node) {
572                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
573                 error->message = "no such node";
574                 return -EINVAL;
575         }
576
577         if (roc_nix_tm_lvl_is_leaf(nix, node->lvl)) {
578                 struct roc_nix_stats_queue qstats;
579
580                 rc = roc_nix_stats_queue_get(nix, node->id, 0, &qstats);
581                 if (!rc) {
582                         stats->n_pkts = qstats.tx_pkts;
583                         stats->n_bytes = qstats.tx_octs;
584                         *stats_mask =
585                                 RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
586                 }
587                 goto exit;
588         }
589
590         rc = roc_nix_tm_node_stats_get(nix, node_id, clear, &nix_tm_stats);
591         if (!rc) {
592                 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
593                         nix_tm_stats.stats[ROC_NIX_TM_NODE_PKTS_DROPPED];
594                 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
595                         nix_tm_stats.stats[ROC_NIX_TM_NODE_BYTES_DROPPED];
596                 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
597                               RTE_TM_STATS_N_BYTES_RED_DROPPED;
598         }
599
600 exit:
601         if (rc) {
602                 error->type = roc_nix_tm_err_to_rte_err(rc);
603                 error->message = roc_error_msg_get(rc);
604         }
605         return rc;
606 }
607
608 int
609 cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
610                           int mark_yellow, int mark_red,
611                           struct rte_tm_error *error)
612 {
613         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
614         struct roc_nix *roc_nix = &dev->nix;
615         int rc;
616
617         if (mark_green) {
618                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
619                 error->message = "Green VLAN marking not supported";
620                 return -EINVAL;
621         }
622
623         if (eth_dev->data->dev_started) {
624                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
625                 error->message = "VLAN DEI mark for running ports not "
626                                  "supported";
627                 return -EBUSY;
628         }
629
630         rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_VLAN_DEI,
631                                     mark_yellow, mark_red);
632         if (rc) {
633                 error->type = roc_nix_tm_err_to_rte_err(rc);
634                 error->message = roc_error_msg_get(rc);
635         }
636         return rc;
637 }
638
639 int
640 cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
641                         int mark_yellow, int mark_red,
642                         struct rte_tm_error *error)
643 {
644         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
645         struct roc_nix *roc_nix = &dev->nix;
646         int rc;
647
648         if (mark_green) {
649                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
650                 error->message = "Green IP ECN marking not supported";
651                 return -EINVAL;
652         }
653
654         if (eth_dev->data->dev_started) {
655                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
656                 error->message = "IP ECN mark for running ports not "
657                                  "supported";
658                 return -EBUSY;
659         }
660
661         rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_ECN,
662                                     mark_yellow, mark_red);
663         if (rc < 0)
664                 goto exit;
665
666         rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_ECN,
667                                     mark_yellow, mark_red);
668 exit:
669         if (rc < 0) {
670                 error->type = roc_nix_tm_err_to_rte_err(rc);
671                 error->message = roc_error_msg_get(rc);
672         }
673         return rc;
674 }
675
676 int
677 cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
678                          int mark_yellow, int mark_red,
679                          struct rte_tm_error *error)
680 {
681         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
682         struct roc_nix *roc_nix = &dev->nix;
683         int rc;
684
685         if (mark_green) {
686                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
687                 error->message = "Green IP DSCP marking not supported";
688                 return -EINVAL;
689         }
690
691         if (eth_dev->data->dev_started) {
692                 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
693                 error->message = "IP DSCP mark for running ports not "
694                                  "supported";
695                 return -EBUSY;
696         }
697
698         rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_DSCP,
699                                     mark_yellow, mark_red);
700         if (rc < 0)
701                 goto exit;
702
703         rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_DSCP,
704                                     mark_yellow, mark_red);
705 exit:
706         if (rc < 0) {
707                 error->type = roc_nix_tm_err_to_rte_err(rc);
708                 error->message = roc_error_msg_get(rc);
709         }
710         return rc;
711 }
712
713 struct rte_tm_ops cnxk_tm_ops = {
714         .node_type_get = cnxk_nix_tm_node_type_get,
715         .capabilities_get = cnxk_nix_tm_capa_get,
716         .level_capabilities_get = cnxk_nix_tm_level_capa_get,
717         .node_capabilities_get = cnxk_nix_tm_node_capa_get,
718
719         .shaper_profile_add = cnxk_nix_tm_shaper_profile_add,
720         .shaper_profile_delete = cnxk_nix_tm_shaper_profile_delete,
721
722         .node_add = cnxk_nix_tm_node_add,
723         .node_delete = cnxk_nix_tm_node_delete,
724         .node_suspend = cnxk_nix_tm_node_suspend,
725         .node_resume = cnxk_nix_tm_node_resume,
726         .hierarchy_commit = cnxk_nix_tm_hierarchy_commit,
727
728         .node_shaper_update = cnxk_nix_tm_node_shaper_update,
729         .node_parent_update = cnxk_nix_tm_node_parent_update,
730         .node_stats_read = cnxk_nix_tm_node_stats_read,
731
732         .mark_vlan_dei = cnxk_nix_tm_mark_vlan_dei,
733         .mark_ip_ecn = cnxk_nix_tm_mark_ip_ecn,
734         .mark_ip_dscp = cnxk_nix_tm_mark_ip_dscp,
735 };
736
737 int
738 cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
739 {
740         if (!arg)
741                 return -EINVAL;
742
743         /* Check for supported revisions */
744         if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
745                 return -EINVAL;
746
747         *(const void **)arg = &cnxk_tm_ops;
748
749         return 0;
750 }
751
752 int
753 cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
754                                  uint16_t queue_idx, uint16_t tx_rate_mbps)
755 {
756         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
757         uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
758         struct roc_nix *nix = &dev->nix;
759         int rc = -EINVAL;
760
761         /* Check for supported revisions */
762         if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
763                 goto exit;
764
765         if (queue_idx >= eth_dev->data->nb_tx_queues)
766                 goto exit;
767
768         if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
769             eth_dev->data->nb_tx_queues > 1) {
770                 /*
771                  * Disable xmit will be enabled when
772                  * new topology is available.
773                  */
774                 rc = roc_nix_tm_hierarchy_disable(nix);
775                 if (rc)
776                         goto exit;
777
778                 rc = roc_nix_tm_prepare_rate_limited_tree(nix);
779                 if (rc)
780                         goto exit;
781
782                 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
783                 if (rc)
784                         goto exit;
785         }
786
787         return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);
788 exit:
789         return rc;
790 }