1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
6 #include <cnxk_utils.h>
9 cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
10 int *is_leaf, struct rte_tm_error *error)
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 struct roc_nix *nix = &dev->nix;
14 struct roc_nix_tm_node *node;
16 if (is_leaf == NULL) {
17 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
21 node = roc_nix_tm_node_get(nix, node_id);
22 if (node_id == RTE_TM_NODE_ID_NULL || !node) {
23 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
27 if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
36 cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
37 struct rte_tm_capabilities *cap,
38 struct rte_tm_error *error)
40 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
41 int rc, max_nr_nodes = 0, i, n_lvl;
42 struct roc_nix *nix = &dev->nix;
43 uint16_t schq[ROC_TM_LVL_MAX];
45 memset(cap, 0, sizeof(*cap));
47 rc = roc_nix_tm_rsrc_count(nix, schq);
49 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
50 error->message = "unexpected fatal error";
54 for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
55 max_nr_nodes += schq[i];
57 cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
59 n_lvl = roc_nix_tm_lvl_cnt_get(nix);
60 /* Consider leaf level */
61 cap->n_levels_max = n_lvl + 1;
62 cap->non_leaf_nodes_identical = 1;
63 cap->leaf_nodes_identical = 1;
65 /* Shaper Capabilities */
66 cap->shaper_private_n_max = max_nr_nodes;
67 cap->shaper_n_max = max_nr_nodes;
68 cap->shaper_private_dual_rate_n_max = max_nr_nodes;
69 cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
70 cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
71 cap->shaper_private_packet_mode_supported = 1;
72 cap->shaper_private_byte_mode_supported = 1;
73 cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
74 cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
76 /* Schedule Capabilities */
77 cap->sched_n_children_max = schq[n_lvl - 1];
78 cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
79 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
80 cap->sched_wfq_n_groups_max = 1;
81 cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
82 cap->sched_wfq_packet_mode_supported = 1;
83 cap->sched_wfq_byte_mode_supported = 1;
85 cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
86 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
87 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
88 RTE_TM_STATS_N_PKTS_RED_DROPPED |
89 RTE_TM_STATS_N_BYTES_RED_DROPPED;
91 cap->mark_vlan_dei_supported[RTE_COLOR_GREEN] = false;
92 cap->mark_ip_ecn_tcp_supported[RTE_COLOR_GREEN] = false;
93 cap->mark_ip_ecn_sctp_supported[RTE_COLOR_GREEN] = false;
94 cap->mark_ip_dscp_supported[RTE_COLOR_GREEN] = false;
96 for (i = RTE_COLOR_YELLOW; i < RTE_COLORS; i++) {
97 cap->mark_vlan_dei_supported[i] = true;
98 cap->mark_ip_ecn_tcp_supported[i] = true;
99 cap->mark_ip_ecn_sctp_supported[i] = true;
100 cap->mark_ip_dscp_supported[i] = true;
107 cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
108 struct rte_tm_level_capabilities *cap,
109 struct rte_tm_error *error)
111 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
112 struct roc_nix *nix = &dev->nix;
113 uint16_t schq[ROC_TM_LVL_MAX];
116 memset(cap, 0, sizeof(*cap));
118 rc = roc_nix_tm_rsrc_count(nix, schq);
120 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
121 error->message = "unexpected fatal error";
125 n_lvl = roc_nix_tm_lvl_cnt_get(nix);
127 if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
129 cap->n_nodes_max = dev->nb_txq;
130 cap->n_nodes_leaf_max = dev->nb_txq;
131 cap->leaf_nodes_identical = 1;
132 cap->leaf.stats_mask =
133 RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
135 } else if (lvl == ROC_TM_LVL_ROOT) {
136 /* Root node, a.k.a. TL2(vf)/TL1(pf) */
137 cap->n_nodes_max = 1;
138 cap->n_nodes_nonleaf_max = 1;
139 cap->non_leaf_nodes_identical = 1;
141 cap->nonleaf.shaper_private_supported = true;
142 cap->nonleaf.shaper_private_dual_rate_supported =
143 roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
145 cap->nonleaf.shaper_private_rate_min =
146 NIX_TM_MIN_SHAPER_RATE / 8;
147 cap->nonleaf.shaper_private_rate_max =
148 NIX_TM_MAX_SHAPER_RATE / 8;
149 cap->nonleaf.shaper_private_packet_mode_supported = 1;
150 cap->nonleaf.shaper_private_byte_mode_supported = 1;
152 cap->nonleaf.sched_n_children_max = schq[lvl];
153 cap->nonleaf.sched_sp_n_priorities_max =
154 roc_nix_tm_max_prio(nix, lvl) + 1;
155 cap->nonleaf.sched_wfq_n_groups_max = 1;
156 cap->nonleaf.sched_wfq_weight_max =
157 roc_nix_tm_max_sched_wt_get();
158 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
159 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
161 if (roc_nix_tm_lvl_have_link_access(nix, lvl))
162 cap->nonleaf.stats_mask =
163 RTE_TM_STATS_N_PKTS_RED_DROPPED |
164 RTE_TM_STATS_N_BYTES_RED_DROPPED;
165 } else if (lvl < ROC_TM_LVL_MAX) {
166 /* TL2, TL3, TL4, MDQ */
167 cap->n_nodes_max = schq[lvl];
168 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
169 cap->non_leaf_nodes_identical = 1;
171 cap->nonleaf.shaper_private_supported = true;
172 cap->nonleaf.shaper_private_dual_rate_supported = true;
173 cap->nonleaf.shaper_private_rate_min =
174 NIX_TM_MIN_SHAPER_RATE / 8;
175 cap->nonleaf.shaper_private_rate_max =
176 NIX_TM_MAX_SHAPER_RATE / 8;
177 cap->nonleaf.shaper_private_packet_mode_supported = 1;
178 cap->nonleaf.shaper_private_byte_mode_supported = 1;
180 /* MDQ doesn't support Strict Priority */
181 if ((int)lvl == (n_lvl - 1))
182 cap->nonleaf.sched_n_children_max = dev->nb_txq;
184 cap->nonleaf.sched_n_children_max = schq[lvl - 1];
185 cap->nonleaf.sched_sp_n_priorities_max =
186 roc_nix_tm_max_prio(nix, lvl) + 1;
187 cap->nonleaf.sched_wfq_n_groups_max = 1;
188 cap->nonleaf.sched_wfq_weight_max =
189 roc_nix_tm_max_sched_wt_get();
190 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
191 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
193 /* unsupported level */
194 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
201 cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
202 struct rte_tm_node_capabilities *cap,
203 struct rte_tm_error *error)
205 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
206 struct cnxk_nix_tm_node *tm_node;
207 struct roc_nix *nix = &dev->nix;
208 uint16_t schq[ROC_TM_LVL_MAX];
211 memset(cap, 0, sizeof(*cap));
213 tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
215 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
216 error->message = "no such node";
220 lvl = tm_node->nix_node.lvl;
221 n_lvl = roc_nix_tm_lvl_cnt_get(nix);
224 if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
225 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
229 rc = roc_nix_tm_rsrc_count(nix, schq);
231 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
232 error->message = "unexpected fatal error";
236 /* Non Leaf Shaper */
237 cap->shaper_private_supported = true;
238 cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
239 cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
240 cap->shaper_private_packet_mode_supported = 1;
241 cap->shaper_private_byte_mode_supported = 1;
243 /* Non Leaf Scheduler */
244 if (lvl == (n_lvl - 1))
245 cap->nonleaf.sched_n_children_max = dev->nb_txq;
247 cap->nonleaf.sched_n_children_max = schq[lvl - 1];
249 cap->nonleaf.sched_sp_n_priorities_max =
250 roc_nix_tm_max_prio(nix, lvl) + 1;
251 cap->nonleaf.sched_wfq_n_children_per_group_max =
252 cap->nonleaf.sched_n_children_max;
253 cap->nonleaf.sched_wfq_n_groups_max = 1;
254 cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
255 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
256 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
258 cap->shaper_private_dual_rate_supported = true;
259 if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
260 cap->shaper_private_dual_rate_supported = false;
261 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
262 RTE_TM_STATS_N_BYTES_RED_DROPPED;
269 cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
270 struct rte_tm_shaper_params *params,
271 struct rte_tm_error *error)
273 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
274 struct cnxk_nix_tm_shaper_profile *profile;
275 struct roc_nix *nix = &dev->nix;
278 if (roc_nix_tm_shaper_profile_get(nix, id)) {
279 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
280 error->message = "shaper profile ID exist";
284 profile = rte_zmalloc("cnxk_nix_tm_shaper_profile",
285 sizeof(struct cnxk_nix_tm_shaper_profile), 0);
288 profile->profile.id = id;
289 profile->profile.commit_rate = params->committed.rate;
290 profile->profile.peak_rate = params->peak.rate;
291 profile->profile.commit_sz = params->committed.size;
292 profile->profile.peak_sz = params->peak.size;
293 /* If Byte mode, then convert to bps */
294 if (!params->packet_mode) {
295 profile->profile.commit_rate *= 8;
296 profile->profile.peak_rate *= 8;
297 profile->profile.commit_sz *= 8;
298 profile->profile.peak_sz *= 8;
300 profile->profile.pkt_len_adj = params->pkt_length_adjust;
301 profile->profile.pkt_mode = params->packet_mode;
302 profile->profile.free_fn = rte_free;
303 rte_memcpy(&profile->params, params,
304 sizeof(struct rte_tm_shaper_params));
306 rc = roc_nix_tm_shaper_profile_add(nix, &profile->profile);
308 /* fill error information based on return value */
310 error->type = roc_nix_tm_err_to_rte_err(rc);
311 error->message = roc_error_msg_get(rc);
318 cnxk_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
320 struct rte_tm_error *error)
322 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
323 struct roc_nix *nix = &dev->nix;
326 rc = roc_nix_tm_shaper_profile_delete(nix, profile_id);
328 error->type = roc_nix_tm_err_to_rte_err(rc);
329 error->message = roc_error_msg_get(rc);
336 cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
337 uint32_t parent_node_id, uint32_t priority,
338 uint32_t weight, uint32_t lvl,
339 struct rte_tm_node_params *params,
340 struct rte_tm_error *error)
342 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
343 struct roc_nix_tm_shaper_profile *profile;
344 struct roc_nix_tm_node *parent_node;
345 struct roc_nix *nix = &dev->nix;
346 struct cnxk_nix_tm_node *node;
349 /* we don't support dynamic updates */
350 if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
351 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
352 error->message = "dynamic update not supported";
356 parent_node = roc_nix_tm_node_get(nix, parent_node_id);
357 /* find the right level */
358 if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
359 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
360 lvl = ROC_TM_LVL_ROOT;
361 } else if (parent_node) {
362 lvl = parent_node->lvl + 1;
364 /* Neither proper parent nor proper level id given */
365 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
366 error->message = "invalid parent node id";
371 node = rte_zmalloc("cnxk_nix_tm_node", sizeof(struct cnxk_nix_tm_node),
376 rte_memcpy(&node->params, params, sizeof(struct rte_tm_node_params));
378 node->nix_node.id = node_id;
379 node->nix_node.parent_id = parent_node_id;
380 node->nix_node.priority = priority;
381 node->nix_node.weight = weight;
382 node->nix_node.lvl = lvl;
383 node->nix_node.shaper_profile_id = params->shaper_profile_id;
385 profile = roc_nix_tm_shaper_profile_get(nix, params->shaper_profile_id);
387 if (!roc_nix_tm_lvl_is_leaf(nix, lvl) &&
388 ((profile && profile->pkt_mode) ||
389 (params->nonleaf.wfq_weight_mode &&
390 params->nonleaf.n_sp_priorities &&
391 !params->nonleaf.wfq_weight_mode[0])))
392 node->nix_node.pkt_mode = 1;
394 rc = roc_nix_tm_node_add(nix, &node->nix_node);
396 error->type = roc_nix_tm_err_to_rte_err(rc);
397 error->message = roc_error_msg_get(rc);
400 error->type = RTE_TM_ERROR_TYPE_NONE;
401 roc_nix_tm_shaper_default_red_algo(&node->nix_node, profile);
407 cnxk_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
408 struct rte_tm_error *error)
410 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
411 struct roc_nix *nix = &dev->nix;
412 struct cnxk_nix_tm_node *node;
415 /* we don't support dynamic updates yet */
416 if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
417 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
418 error->message = "hierarchy exists";
422 if (node_id == RTE_TM_NODE_ID_NULL) {
423 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
424 error->message = "invalid node id";
428 node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
430 rc = roc_nix_tm_node_delete(nix, node_id, 0);
432 error->type = roc_nix_tm_err_to_rte_err(rc);
433 error->message = roc_error_msg_get(rc);
442 cnxk_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
443 struct rte_tm_error *error)
445 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
448 rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, true);
450 error->type = roc_nix_tm_err_to_rte_err(rc);
451 error->message = roc_error_msg_get(rc);
458 cnxk_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
459 struct rte_tm_error *error)
461 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
464 rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, false);
466 error->type = roc_nix_tm_err_to_rte_err(rc);
467 error->message = roc_error_msg_get(rc);
474 cnxk_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
475 int clear_on_fail __rte_unused,
476 struct rte_tm_error *error)
478 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
479 struct roc_nix *nix = &dev->nix;
482 if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
483 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
484 error->message = "hierarchy exists";
488 if (roc_nix_tm_leaf_cnt(nix) < dev->nb_txq) {
489 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
490 error->message = "incomplete hierarchy";
494 rc = roc_nix_tm_hierarchy_disable(nix);
496 error->type = roc_nix_tm_err_to_rte_err(rc);
497 error->message = roc_error_msg_get(rc);
501 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_USER, true);
503 error->type = roc_nix_tm_err_to_rte_err(rc);
504 error->message = roc_error_msg_get(rc);
507 error->type = RTE_TM_ERROR_TYPE_NONE;
513 cnxk_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
514 uint32_t profile_id, struct rte_tm_error *error)
516 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
517 struct roc_nix_tm_shaper_profile *profile;
518 struct roc_nix *nix = &dev->nix;
519 struct roc_nix_tm_node *node;
522 rc = roc_nix_tm_node_shaper_update(nix, node_id, profile_id, false);
524 error->type = roc_nix_tm_err_to_rte_err(rc);
525 error->message = roc_error_msg_get(rc);
528 node = roc_nix_tm_node_get(nix, node_id);
532 profile = roc_nix_tm_shaper_profile_get(nix, profile_id);
533 roc_nix_tm_shaper_default_red_algo(node, profile);
539 cnxk_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
540 uint32_t new_parent_id, uint32_t priority,
541 uint32_t weight, struct rte_tm_error *error)
543 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
544 struct roc_nix *nix = &dev->nix;
547 rc = roc_nix_tm_node_parent_update(nix, node_id, new_parent_id,
550 error->type = roc_nix_tm_err_to_rte_err(rc);
551 error->message = roc_error_msg_get(rc);
559 cnxk_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
560 struct rte_tm_node_stats *stats,
561 uint64_t *stats_mask, int clear,
562 struct rte_tm_error *error)
564 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
565 struct roc_nix_tm_node_stats nix_tm_stats;
566 struct roc_nix *nix = &dev->nix;
567 struct roc_nix_tm_node *node;
570 node = roc_nix_tm_node_get(nix, node_id);
572 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
573 error->message = "no such node";
577 if (roc_nix_tm_lvl_is_leaf(nix, node->lvl)) {
578 struct roc_nix_stats_queue qstats;
580 rc = roc_nix_stats_queue_get(nix, node->id, 0, &qstats);
582 stats->n_pkts = qstats.tx_pkts;
583 stats->n_bytes = qstats.tx_octs;
585 RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
590 rc = roc_nix_tm_node_stats_get(nix, node_id, clear, &nix_tm_stats);
592 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
593 nix_tm_stats.stats[ROC_NIX_TM_NODE_PKTS_DROPPED];
594 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
595 nix_tm_stats.stats[ROC_NIX_TM_NODE_BYTES_DROPPED];
596 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
597 RTE_TM_STATS_N_BYTES_RED_DROPPED;
602 error->type = roc_nix_tm_err_to_rte_err(rc);
603 error->message = roc_error_msg_get(rc);
609 cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
610 int mark_yellow, int mark_red,
611 struct rte_tm_error *error)
613 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
614 struct roc_nix *roc_nix = &dev->nix;
618 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
619 error->message = "Green VLAN marking not supported";
623 if (eth_dev->data->dev_started) {
624 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
625 error->message = "VLAN DEI mark for running ports not "
630 rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_VLAN_DEI,
631 mark_yellow, mark_red);
633 error->type = roc_nix_tm_err_to_rte_err(rc);
634 error->message = roc_error_msg_get(rc);
640 cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
641 int mark_yellow, int mark_red,
642 struct rte_tm_error *error)
644 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
645 struct roc_nix *roc_nix = &dev->nix;
649 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
650 error->message = "Green IP ECN marking not supported";
654 if (eth_dev->data->dev_started) {
655 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
656 error->message = "IP ECN mark for running ports not "
661 rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_ECN,
662 mark_yellow, mark_red);
666 rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_ECN,
667 mark_yellow, mark_red);
670 error->type = roc_nix_tm_err_to_rte_err(rc);
671 error->message = roc_error_msg_get(rc);
677 cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
678 int mark_yellow, int mark_red,
679 struct rte_tm_error *error)
681 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
682 struct roc_nix *roc_nix = &dev->nix;
686 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
687 error->message = "Green IP DSCP marking not supported";
691 if (eth_dev->data->dev_started) {
692 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
693 error->message = "IP DSCP mark for running ports not "
698 rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_DSCP,
699 mark_yellow, mark_red);
703 rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_DSCP,
704 mark_yellow, mark_red);
707 error->type = roc_nix_tm_err_to_rte_err(rc);
708 error->message = roc_error_msg_get(rc);
713 struct rte_tm_ops cnxk_tm_ops = {
714 .node_type_get = cnxk_nix_tm_node_type_get,
715 .capabilities_get = cnxk_nix_tm_capa_get,
716 .level_capabilities_get = cnxk_nix_tm_level_capa_get,
717 .node_capabilities_get = cnxk_nix_tm_node_capa_get,
719 .shaper_profile_add = cnxk_nix_tm_shaper_profile_add,
720 .shaper_profile_delete = cnxk_nix_tm_shaper_profile_delete,
722 .node_add = cnxk_nix_tm_node_add,
723 .node_delete = cnxk_nix_tm_node_delete,
724 .node_suspend = cnxk_nix_tm_node_suspend,
725 .node_resume = cnxk_nix_tm_node_resume,
726 .hierarchy_commit = cnxk_nix_tm_hierarchy_commit,
728 .node_shaper_update = cnxk_nix_tm_node_shaper_update,
729 .node_parent_update = cnxk_nix_tm_node_parent_update,
730 .node_stats_read = cnxk_nix_tm_node_stats_read,
732 .mark_vlan_dei = cnxk_nix_tm_mark_vlan_dei,
733 .mark_ip_ecn = cnxk_nix_tm_mark_ip_ecn,
734 .mark_ip_dscp = cnxk_nix_tm_mark_ip_dscp,
738 cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
743 /* Check for supported revisions */
744 if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
747 *(const void **)arg = &cnxk_tm_ops;
753 cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
754 uint16_t queue_idx, uint16_t tx_rate_mbps)
756 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
757 uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
758 struct roc_nix *nix = &dev->nix;
761 /* Check for supported revisions */
762 if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
765 if (queue_idx >= eth_dev->data->nb_tx_queues)
768 if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
769 eth_dev->data->nb_tx_queues > 1) {
771 * Disable xmit will be enabled when
772 * new topology is available.
774 rc = roc_nix_tm_hierarchy_disable(nix);
778 rc = roc_nix_tm_prepare_rate_limited_tree(nix);
782 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
787 return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);