1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
6 #include <cnxk_utils.h>
9 cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
10 int *is_leaf, struct rte_tm_error *error)
12 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
13 struct roc_nix *nix = &dev->nix;
14 struct roc_nix_tm_node *node;
16 if (is_leaf == NULL) {
17 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
21 node = roc_nix_tm_node_get(nix, node_id);
22 if (node_id == RTE_TM_NODE_ID_NULL || !node) {
23 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
27 if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
36 cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
37 struct rte_tm_capabilities *cap,
38 struct rte_tm_error *error)
40 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
41 int rc, max_nr_nodes = 0, i, n_lvl;
42 struct roc_nix *nix = &dev->nix;
43 uint16_t schq[ROC_TM_LVL_MAX];
45 memset(cap, 0, sizeof(*cap));
47 rc = roc_nix_tm_rsrc_count(nix, schq);
49 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
50 error->message = "unexpected fatal error";
54 for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
55 max_nr_nodes += schq[i];
57 cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
59 n_lvl = roc_nix_tm_lvl_cnt_get(nix);
60 /* Consider leaf level */
61 cap->n_levels_max = n_lvl + 1;
62 cap->non_leaf_nodes_identical = 1;
63 cap->leaf_nodes_identical = 1;
65 /* Shaper Capabilities */
66 cap->shaper_private_n_max = max_nr_nodes;
67 cap->shaper_n_max = max_nr_nodes;
68 cap->shaper_private_dual_rate_n_max = max_nr_nodes;
69 cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
70 cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
71 cap->shaper_private_packet_mode_supported = 1;
72 cap->shaper_private_byte_mode_supported = 1;
73 cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
74 cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
76 /* Schedule Capabilities */
77 cap->sched_n_children_max = schq[n_lvl - 1];
78 cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
79 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
80 cap->sched_wfq_n_groups_max = 1;
81 cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
82 cap->sched_wfq_packet_mode_supported = 1;
83 cap->sched_wfq_byte_mode_supported = 1;
85 cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
86 RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
87 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
88 RTE_TM_STATS_N_PKTS_RED_DROPPED |
89 RTE_TM_STATS_N_BYTES_RED_DROPPED;
91 for (i = 0; i < RTE_COLORS; i++) {
92 cap->mark_vlan_dei_supported[i] = false;
93 cap->mark_ip_ecn_tcp_supported[i] = false;
94 cap->mark_ip_dscp_supported[i] = false;
101 cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
102 struct rte_tm_level_capabilities *cap,
103 struct rte_tm_error *error)
105 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
106 struct roc_nix *nix = &dev->nix;
107 uint16_t schq[ROC_TM_LVL_MAX];
110 memset(cap, 0, sizeof(*cap));
112 rc = roc_nix_tm_rsrc_count(nix, schq);
114 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
115 error->message = "unexpected fatal error";
119 n_lvl = roc_nix_tm_lvl_cnt_get(nix);
121 if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
123 cap->n_nodes_max = dev->nb_txq;
124 cap->n_nodes_leaf_max = dev->nb_txq;
125 cap->leaf_nodes_identical = 1;
126 cap->leaf.stats_mask =
127 RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
129 } else if (lvl == ROC_TM_LVL_ROOT) {
130 /* Root node, a.k.a. TL2(vf)/TL1(pf) */
131 cap->n_nodes_max = 1;
132 cap->n_nodes_nonleaf_max = 1;
133 cap->non_leaf_nodes_identical = 1;
135 cap->nonleaf.shaper_private_supported = true;
136 cap->nonleaf.shaper_private_dual_rate_supported =
137 roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
139 cap->nonleaf.shaper_private_rate_min =
140 NIX_TM_MIN_SHAPER_RATE / 8;
141 cap->nonleaf.shaper_private_rate_max =
142 NIX_TM_MAX_SHAPER_RATE / 8;
143 cap->nonleaf.shaper_private_packet_mode_supported = 1;
144 cap->nonleaf.shaper_private_byte_mode_supported = 1;
146 cap->nonleaf.sched_n_children_max = schq[lvl];
147 cap->nonleaf.sched_sp_n_priorities_max =
148 roc_nix_tm_max_prio(nix, lvl) + 1;
149 cap->nonleaf.sched_wfq_n_groups_max = 1;
150 cap->nonleaf.sched_wfq_weight_max =
151 roc_nix_tm_max_sched_wt_get();
152 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
153 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
155 if (roc_nix_tm_lvl_have_link_access(nix, lvl))
156 cap->nonleaf.stats_mask =
157 RTE_TM_STATS_N_PKTS_RED_DROPPED |
158 RTE_TM_STATS_N_BYTES_RED_DROPPED;
159 } else if (lvl < ROC_TM_LVL_MAX) {
160 /* TL2, TL3, TL4, MDQ */
161 cap->n_nodes_max = schq[lvl];
162 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
163 cap->non_leaf_nodes_identical = 1;
165 cap->nonleaf.shaper_private_supported = true;
166 cap->nonleaf.shaper_private_dual_rate_supported = true;
167 cap->nonleaf.shaper_private_rate_min =
168 NIX_TM_MIN_SHAPER_RATE / 8;
169 cap->nonleaf.shaper_private_rate_max =
170 NIX_TM_MAX_SHAPER_RATE / 8;
171 cap->nonleaf.shaper_private_packet_mode_supported = 1;
172 cap->nonleaf.shaper_private_byte_mode_supported = 1;
174 /* MDQ doesn't support Strict Priority */
175 if ((int)lvl == (n_lvl - 1))
176 cap->nonleaf.sched_n_children_max = dev->nb_txq;
178 cap->nonleaf.sched_n_children_max = schq[lvl - 1];
179 cap->nonleaf.sched_sp_n_priorities_max =
180 roc_nix_tm_max_prio(nix, lvl) + 1;
181 cap->nonleaf.sched_wfq_n_groups_max = 1;
182 cap->nonleaf.sched_wfq_weight_max =
183 roc_nix_tm_max_sched_wt_get();
184 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
185 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
187 /* unsupported level */
188 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
195 cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
196 struct rte_tm_node_capabilities *cap,
197 struct rte_tm_error *error)
199 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
200 struct cnxk_nix_tm_node *tm_node;
201 struct roc_nix *nix = &dev->nix;
202 uint16_t schq[ROC_TM_LVL_MAX];
205 memset(cap, 0, sizeof(*cap));
207 tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
209 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
210 error->message = "no such node";
214 lvl = tm_node->nix_node.lvl;
215 n_lvl = roc_nix_tm_lvl_cnt_get(nix);
218 if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
219 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
223 rc = roc_nix_tm_rsrc_count(nix, schq);
225 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
226 error->message = "unexpected fatal error";
230 /* Non Leaf Shaper */
231 cap->shaper_private_supported = true;
232 cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
233 cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
234 cap->shaper_private_packet_mode_supported = 1;
235 cap->shaper_private_byte_mode_supported = 1;
237 /* Non Leaf Scheduler */
238 if (lvl == (n_lvl - 1))
239 cap->nonleaf.sched_n_children_max = dev->nb_txq;
241 cap->nonleaf.sched_n_children_max = schq[lvl - 1];
243 cap->nonleaf.sched_sp_n_priorities_max =
244 roc_nix_tm_max_prio(nix, lvl) + 1;
245 cap->nonleaf.sched_wfq_n_children_per_group_max =
246 cap->nonleaf.sched_n_children_max;
247 cap->nonleaf.sched_wfq_n_groups_max = 1;
248 cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
249 cap->nonleaf.sched_wfq_packet_mode_supported = 1;
250 cap->nonleaf.sched_wfq_byte_mode_supported = 1;
252 cap->shaper_private_dual_rate_supported = true;
253 if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
254 cap->shaper_private_dual_rate_supported = false;
255 cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
256 RTE_TM_STATS_N_BYTES_RED_DROPPED;
263 cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
264 struct rte_tm_shaper_params *params,
265 struct rte_tm_error *error)
267 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
268 struct cnxk_nix_tm_shaper_profile *profile;
269 struct roc_nix *nix = &dev->nix;
272 if (roc_nix_tm_shaper_profile_get(nix, id)) {
273 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
274 error->message = "shaper profile ID exist";
278 profile = rte_zmalloc("cnxk_nix_tm_shaper_profile",
279 sizeof(struct cnxk_nix_tm_shaper_profile), 0);
282 profile->profile.id = id;
283 profile->profile.commit_rate = params->committed.rate;
284 profile->profile.peak_rate = params->peak.rate;
285 profile->profile.commit_sz = params->committed.size;
286 profile->profile.peak_sz = params->peak.size;
287 /* If Byte mode, then convert to bps */
288 if (!params->packet_mode) {
289 profile->profile.commit_rate *= 8;
290 profile->profile.peak_rate *= 8;
291 profile->profile.commit_sz *= 8;
292 profile->profile.peak_sz *= 8;
294 profile->profile.pkt_len_adj = params->pkt_length_adjust;
295 profile->profile.pkt_mode = params->packet_mode;
296 profile->profile.free_fn = rte_free;
297 rte_memcpy(&profile->params, params,
298 sizeof(struct rte_tm_shaper_params));
300 rc = roc_nix_tm_shaper_profile_add(nix, &profile->profile);
302 /* fill error information based on return value */
304 error->type = roc_nix_tm_err_to_rte_err(rc);
305 error->message = roc_error_msg_get(rc);
312 cnxk_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
314 struct rte_tm_error *error)
316 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
317 struct roc_nix *nix = &dev->nix;
320 rc = roc_nix_tm_shaper_profile_delete(nix, profile_id);
322 error->type = roc_nix_tm_err_to_rte_err(rc);
323 error->message = roc_error_msg_get(rc);
330 cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
331 uint32_t parent_node_id, uint32_t priority,
332 uint32_t weight, uint32_t lvl,
333 struct rte_tm_node_params *params,
334 struct rte_tm_error *error)
336 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
337 struct roc_nix_tm_shaper_profile *profile;
338 struct roc_nix_tm_node *parent_node;
339 struct roc_nix *nix = &dev->nix;
340 struct cnxk_nix_tm_node *node;
343 /* we don't support dynamic updates */
344 if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
345 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
346 error->message = "dynamic update not supported";
350 parent_node = roc_nix_tm_node_get(nix, parent_node_id);
351 /* find the right level */
352 if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
353 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
354 lvl = ROC_TM_LVL_ROOT;
355 } else if (parent_node) {
356 lvl = parent_node->lvl + 1;
358 /* Neither proper parent nor proper level id given */
359 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
360 error->message = "invalid parent node id";
365 node = rte_zmalloc("cnxk_nix_tm_node", sizeof(struct cnxk_nix_tm_node),
370 rte_memcpy(&node->params, params, sizeof(struct rte_tm_node_params));
372 node->nix_node.id = node_id;
373 node->nix_node.parent_id = parent_node_id;
374 node->nix_node.priority = priority;
375 node->nix_node.weight = weight;
376 node->nix_node.lvl = lvl;
377 node->nix_node.shaper_profile_id = params->shaper_profile_id;
379 profile = roc_nix_tm_shaper_profile_get(nix, params->shaper_profile_id);
381 if (!roc_nix_tm_lvl_is_leaf(nix, lvl) &&
382 ((profile && profile->pkt_mode) ||
383 (params->nonleaf.wfq_weight_mode &&
384 params->nonleaf.n_sp_priorities &&
385 !params->nonleaf.wfq_weight_mode[0])))
386 node->nix_node.pkt_mode = 1;
388 rc = roc_nix_tm_node_add(nix, &node->nix_node);
390 error->type = roc_nix_tm_err_to_rte_err(rc);
391 error->message = roc_error_msg_get(rc);
394 error->type = RTE_TM_ERROR_TYPE_NONE;
395 roc_nix_tm_shaper_default_red_algo(&node->nix_node, profile);
401 cnxk_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
402 struct rte_tm_error *error)
404 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
405 struct roc_nix *nix = &dev->nix;
406 struct cnxk_nix_tm_node *node;
409 /* we don't support dynamic updates yet */
410 if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
411 error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
412 error->message = "hierarchy exists";
416 if (node_id == RTE_TM_NODE_ID_NULL) {
417 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
418 error->message = "invalid node id";
422 node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
424 rc = roc_nix_tm_node_delete(nix, node_id, 0);
426 error->type = roc_nix_tm_err_to_rte_err(rc);
427 error->message = roc_error_msg_get(rc);
436 cnxk_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
437 struct rte_tm_error *error)
439 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
442 rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, true);
444 error->type = roc_nix_tm_err_to_rte_err(rc);
445 error->message = roc_error_msg_get(rc);
452 cnxk_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
453 struct rte_tm_error *error)
455 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
458 rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, false);
460 error->type = roc_nix_tm_err_to_rte_err(rc);
461 error->message = roc_error_msg_get(rc);
468 cnxk_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
469 int clear_on_fail __rte_unused,
470 struct rte_tm_error *error)
472 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
473 struct roc_nix *nix = &dev->nix;
476 if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
477 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
478 error->message = "hierarchy exists";
482 if (roc_nix_tm_leaf_cnt(nix) < dev->nb_txq) {
483 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
484 error->message = "incomplete hierarchy";
488 rc = roc_nix_tm_hierarchy_disable(nix);
490 error->type = roc_nix_tm_err_to_rte_err(rc);
491 error->message = roc_error_msg_get(rc);
495 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_USER, true);
497 error->type = roc_nix_tm_err_to_rte_err(rc);
498 error->message = roc_error_msg_get(rc);
501 error->type = RTE_TM_ERROR_TYPE_NONE;
507 cnxk_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
508 uint32_t profile_id, struct rte_tm_error *error)
510 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
511 struct roc_nix_tm_shaper_profile *profile;
512 struct roc_nix *nix = &dev->nix;
513 struct roc_nix_tm_node *node;
516 rc = roc_nix_tm_node_shaper_update(nix, node_id, profile_id, false);
518 error->type = roc_nix_tm_err_to_rte_err(rc);
519 error->message = roc_error_msg_get(rc);
522 node = roc_nix_tm_node_get(nix, node_id);
526 profile = roc_nix_tm_shaper_profile_get(nix, profile_id);
527 roc_nix_tm_shaper_default_red_algo(node, profile);
533 cnxk_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
534 uint32_t new_parent_id, uint32_t priority,
535 uint32_t weight, struct rte_tm_error *error)
537 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
538 struct roc_nix *nix = &dev->nix;
541 rc = roc_nix_tm_node_parent_update(nix, node_id, new_parent_id,
544 error->type = roc_nix_tm_err_to_rte_err(rc);
545 error->message = roc_error_msg_get(rc);
553 cnxk_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
554 struct rte_tm_node_stats *stats,
555 uint64_t *stats_mask, int clear,
556 struct rte_tm_error *error)
558 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
559 struct roc_nix_tm_node_stats nix_tm_stats;
560 struct roc_nix *nix = &dev->nix;
561 struct roc_nix_tm_node *node;
564 node = roc_nix_tm_node_get(nix, node_id);
566 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
567 error->message = "no such node";
571 if (roc_nix_tm_lvl_is_leaf(nix, node->lvl)) {
572 struct roc_nix_stats_queue qstats;
574 rc = roc_nix_stats_queue_get(nix, node->id, 0, &qstats);
576 stats->n_pkts = qstats.tx_pkts;
577 stats->n_bytes = qstats.tx_octs;
579 RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
584 rc = roc_nix_tm_node_stats_get(nix, node_id, clear, &nix_tm_stats);
586 stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
587 nix_tm_stats.stats[ROC_NIX_TM_NODE_PKTS_DROPPED];
588 stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
589 nix_tm_stats.stats[ROC_NIX_TM_NODE_BYTES_DROPPED];
590 *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
591 RTE_TM_STATS_N_BYTES_RED_DROPPED;
596 error->type = roc_nix_tm_err_to_rte_err(rc);
597 error->message = roc_error_msg_get(rc);
602 const struct rte_tm_ops cnxk_tm_ops = {
603 .node_type_get = cnxk_nix_tm_node_type_get,
604 .capabilities_get = cnxk_nix_tm_capa_get,
605 .level_capabilities_get = cnxk_nix_tm_level_capa_get,
606 .node_capabilities_get = cnxk_nix_tm_node_capa_get,
608 .shaper_profile_add = cnxk_nix_tm_shaper_profile_add,
609 .shaper_profile_delete = cnxk_nix_tm_shaper_profile_delete,
611 .node_add = cnxk_nix_tm_node_add,
612 .node_delete = cnxk_nix_tm_node_delete,
613 .node_suspend = cnxk_nix_tm_node_suspend,
614 .node_resume = cnxk_nix_tm_node_resume,
615 .hierarchy_commit = cnxk_nix_tm_hierarchy_commit,
617 .node_shaper_update = cnxk_nix_tm_node_shaper_update,
618 .node_parent_update = cnxk_nix_tm_node_parent_update,
619 .node_stats_read = cnxk_nix_tm_node_stats_read,
623 cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
628 /* Check for supported revisions */
629 if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
632 *(const void **)arg = &cnxk_tm_ops;
638 cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
639 uint16_t queue_idx, uint16_t tx_rate_mbps)
641 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
642 uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
643 struct roc_nix *nix = &dev->nix;
646 /* Check for supported revisions */
647 if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
650 if (queue_idx >= eth_dev->data->nb_tx_queues)
653 if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
654 eth_dev->data->nb_tx_queues > 1) {
656 * Disable xmit will be enabled when
657 * new topology is available.
659 rc = roc_nix_tm_hierarchy_disable(nix);
663 rc = roc_nix_tm_prepare_rate_limited_tree(nix);
667 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
672 return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);