1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
5 #include <rte_malloc.h>
7 #include "txgbe_ethdev.h"
9 static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
10 struct rte_tm_capabilities *cap,
11 struct rte_tm_error *error);
12 static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
13 uint32_t shaper_profile_id,
14 struct rte_tm_shaper_params *profile,
15 struct rte_tm_error *error);
16 static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
17 uint32_t shaper_profile_id,
18 struct rte_tm_error *error);
19 static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
20 uint32_t parent_node_id, uint32_t priority,
21 uint32_t weight, uint32_t level_id,
22 struct rte_tm_node_params *params,
23 struct rte_tm_error *error);
24 static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
25 struct rte_tm_error *error);
26 static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
27 int *is_leaf, struct rte_tm_error *error);
28 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
30 struct rte_tm_level_capabilities *cap,
31 struct rte_tm_error *error);
32 static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
34 struct rte_tm_node_capabilities *cap,
35 struct rte_tm_error *error);
36 static int txgbe_hierarchy_commit(struct rte_eth_dev *dev,
38 struct rte_tm_error *error);
40 const struct rte_tm_ops txgbe_tm_ops = {
41 .capabilities_get = txgbe_tm_capabilities_get,
42 .shaper_profile_add = txgbe_shaper_profile_add,
43 .shaper_profile_delete = txgbe_shaper_profile_del,
44 .node_add = txgbe_node_add,
45 .node_delete = txgbe_node_delete,
46 .node_type_get = txgbe_node_type_get,
47 .level_capabilities_get = txgbe_level_capabilities_get,
48 .node_capabilities_get = txgbe_node_capabilities_get,
49 .hierarchy_commit = txgbe_hierarchy_commit,
53 txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
59 *(const void **)arg = &txgbe_tm_ops;
65 txgbe_tm_conf_init(struct rte_eth_dev *dev)
67 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
69 /* initialize shaper profile list */
70 TAILQ_INIT(&tm_conf->shaper_profile_list);
72 /* initialize node configuration */
74 TAILQ_INIT(&tm_conf->queue_list);
75 TAILQ_INIT(&tm_conf->tc_list);
76 tm_conf->nb_tc_node = 0;
77 tm_conf->nb_queue_node = 0;
78 tm_conf->committed = false;
82 txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
84 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
85 struct txgbe_tm_shaper_profile *shaper_profile;
86 struct txgbe_tm_node *tm_node;
88 /* clear node configuration */
89 while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
90 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
93 tm_conf->nb_queue_node = 0;
94 while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
95 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
98 tm_conf->nb_tc_node = 0;
100 rte_free(tm_conf->root);
101 tm_conf->root = NULL;
104 /* Remove all shaper profiles */
105 while ((shaper_profile =
106 TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
107 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
108 shaper_profile, node);
109 rte_free(shaper_profile);
113 static inline uint8_t
114 txgbe_tc_nb_get(struct rte_eth_dev *dev)
116 struct rte_eth_conf *eth_conf;
119 eth_conf = &dev->data->dev_conf;
120 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
121 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
122 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
123 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
136 txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
137 struct rte_tm_capabilities *cap,
138 struct rte_tm_error *error)
140 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
141 uint8_t tc_nb = txgbe_tc_nb_get(dev);
146 if (tc_nb > hw->mac.max_tx_queues)
149 error->type = RTE_TM_ERROR_TYPE_NONE;
151 /* set all the parameters to 0 first. */
152 memset(cap, 0, sizeof(struct rte_tm_capabilities));
155 * here is the max capability not the current configuration.
157 /* port + TCs + queues */
158 cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
159 hw->mac.max_tx_queues;
160 cap->n_levels_max = 3;
161 cap->non_leaf_nodes_identical = 1;
162 cap->leaf_nodes_identical = 1;
163 cap->shaper_n_max = cap->n_nodes_max;
164 cap->shaper_private_n_max = cap->n_nodes_max;
165 cap->shaper_private_dual_rate_n_max = 0;
166 cap->shaper_private_rate_min = 0;
167 /* 10Gbps -> 1.25GBps */
168 cap->shaper_private_rate_max = 1250000000ull;
169 cap->shaper_shared_n_max = 0;
170 cap->shaper_shared_n_nodes_per_shaper_max = 0;
171 cap->shaper_shared_n_shapers_per_node_max = 0;
172 cap->shaper_shared_dual_rate_n_max = 0;
173 cap->shaper_shared_rate_min = 0;
174 cap->shaper_shared_rate_max = 0;
175 cap->sched_n_children_max = hw->mac.max_tx_queues;
177 * HW supports SP. But no plan to support it now.
178 * So, all the nodes should have the same priority.
180 cap->sched_sp_n_priorities_max = 1;
181 cap->sched_wfq_n_children_per_group_max = 0;
182 cap->sched_wfq_n_groups_max = 0;
184 * SW only supports fair round robin now.
185 * So, all the nodes should have the same weight.
187 cap->sched_wfq_weight_max = 1;
188 cap->cman_head_drop_supported = 0;
189 cap->dynamic_update_mask = 0;
190 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
191 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
192 cap->cman_wred_context_n_max = 0;
193 cap->cman_wred_context_private_n_max = 0;
194 cap->cman_wred_context_shared_n_max = 0;
195 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
196 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
202 static inline struct txgbe_tm_shaper_profile *
203 txgbe_shaper_profile_search(struct rte_eth_dev *dev,
204 uint32_t shaper_profile_id)
206 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
207 struct txgbe_shaper_profile_list *shaper_profile_list =
208 &tm_conf->shaper_profile_list;
209 struct txgbe_tm_shaper_profile *shaper_profile;
211 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
212 if (shaper_profile_id == shaper_profile->shaper_profile_id)
213 return shaper_profile;
220 txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
221 struct rte_tm_error *error)
223 /* min rate not supported */
224 if (profile->committed.rate) {
225 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
226 error->message = "committed rate not supported";
229 /* min bucket size not supported */
230 if (profile->committed.size) {
231 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
232 error->message = "committed bucket size not supported";
235 /* max bucket size not supported */
236 if (profile->peak.size) {
237 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
238 error->message = "peak bucket size not supported";
241 /* length adjustment not supported */
242 if (profile->pkt_length_adjust) {
243 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
244 error->message = "packet length adjustment not supported";
252 txgbe_shaper_profile_add(struct rte_eth_dev *dev,
253 uint32_t shaper_profile_id,
254 struct rte_tm_shaper_params *profile,
255 struct rte_tm_error *error)
257 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
258 struct txgbe_tm_shaper_profile *shaper_profile;
261 if (!profile || !error)
264 ret = txgbe_shaper_profile_param_check(profile, error);
268 shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
270 if (shaper_profile) {
271 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
272 error->message = "profile ID exist";
276 shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile",
277 sizeof(struct txgbe_tm_shaper_profile),
281 shaper_profile->shaper_profile_id = shaper_profile_id;
282 rte_memcpy(&shaper_profile->profile, profile,
283 sizeof(struct rte_tm_shaper_params));
284 TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
285 shaper_profile, node);
291 txgbe_shaper_profile_del(struct rte_eth_dev *dev,
292 uint32_t shaper_profile_id,
293 struct rte_tm_error *error)
295 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
296 struct txgbe_tm_shaper_profile *shaper_profile;
301 shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
303 if (!shaper_profile) {
304 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
305 error->message = "profile ID not exist";
309 /* don't delete a profile if it's used by one or several nodes */
310 if (shaper_profile->reference_count) {
311 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
312 error->message = "profile in use";
316 TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
317 rte_free(shaper_profile);
322 static inline struct txgbe_tm_node *
323 txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
324 enum txgbe_tm_node_type *node_type)
326 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
327 struct txgbe_tm_node *tm_node;
329 if (tm_conf->root && tm_conf->root->id == node_id) {
330 *node_type = TXGBE_TM_NODE_TYPE_PORT;
331 return tm_conf->root;
334 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
335 if (tm_node->id == node_id) {
336 *node_type = TXGBE_TM_NODE_TYPE_TC;
341 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
342 if (tm_node->id == node_id) {
343 *node_type = TXGBE_TM_NODE_TYPE_QUEUE;
352 txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
353 uint16_t *base, uint16_t *nb)
355 uint8_t nb_tcs = txgbe_tc_nb_get(dev);
356 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
357 uint16_t vf_num = pci_dev->max_vfs;
366 if (vf_num >= ETH_32_POOLS) {
369 } else if (vf_num >= ETH_16_POOLS) {
379 *base = vf_num * nb_tcs + tc_node_no;
383 if (nb_tcs == ETH_8_TCS) {
384 switch (tc_node_no) {
421 switch (tc_node_no) {
423 * If no VF and no DCB, only 64 queues can be used.
424 * This case also be covered by this "case 0".
450 txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
451 uint32_t priority, uint32_t weight,
452 struct rte_tm_node_params *params,
453 struct rte_tm_error *error)
455 if (node_id == RTE_TM_NODE_ID_NULL) {
456 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
457 error->message = "invalid node id";
462 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
463 error->message = "priority should be 0";
468 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
469 error->message = "weight must be 1";
473 /* not support shared shaper */
474 if (params->shared_shaper_id) {
475 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
476 error->message = "shared shaper not supported";
479 if (params->n_shared_shapers) {
480 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
481 error->message = "shared shaper not supported";
485 /* for non-leaf node */
486 if (node_id >= dev->data->nb_tx_queues) {
487 /* check the unsupported parameters */
488 if (params->nonleaf.wfq_weight_mode) {
490 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
491 error->message = "WFQ not supported";
494 if (params->nonleaf.n_sp_priorities != 1) {
496 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
497 error->message = "SP priority not supported";
499 } else if (params->nonleaf.wfq_weight_mode &&
500 !(*params->nonleaf.wfq_weight_mode)) {
502 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
503 error->message = "WFP should be byte mode";
511 /* check the unsupported parameters */
512 if (params->leaf.cman) {
513 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
514 error->message = "Congestion management not supported";
517 if (params->leaf.wred.wred_profile_id !=
518 RTE_TM_WRED_PROFILE_ID_NONE) {
520 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
521 error->message = "WRED not supported";
524 if (params->leaf.wred.shared_wred_context_id) {
526 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
527 error->message = "WRED not supported";
530 if (params->leaf.wred.n_shared_wred_contexts) {
532 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
533 error->message = "WRED not supported";
541 * Now the TC and queue configuration is controlled by DCB.
542 * We need check if the node configuration follows the DCB configuration.
543 * In the future, we may use TM to cover DCB.
546 txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
547 uint32_t parent_node_id, uint32_t priority,
548 uint32_t weight, uint32_t level_id,
549 struct rte_tm_node_params *params,
550 struct rte_tm_error *error)
552 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
553 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
554 enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX;
555 struct txgbe_tm_shaper_profile *shaper_profile = NULL;
556 struct txgbe_tm_node *tm_node;
557 struct txgbe_tm_node *parent_node;
563 if (!params || !error)
566 /* if already committed */
567 if (tm_conf->committed) {
568 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
569 error->message = "already committed";
573 ret = txgbe_node_param_check(dev, node_id, priority, weight,
578 /* check if the node ID is already used */
579 if (txgbe_tm_node_search(dev, node_id, &node_type)) {
580 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
581 error->message = "node id already used";
585 /* check the shaper profile id */
586 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
587 shaper_profile = txgbe_shaper_profile_search(dev,
588 params->shaper_profile_id);
589 if (!shaper_profile) {
591 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
592 error->message = "shaper profile not exist";
597 /* root node if not have a parent */
598 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
600 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
601 level_id > TXGBE_TM_NODE_TYPE_PORT) {
602 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
603 error->message = "Wrong level";
607 /* obviously no more than one root */
609 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
610 error->message = "already have a root";
614 /* add the root node */
615 tm_node = rte_zmalloc("txgbe_tm_node",
616 sizeof(struct txgbe_tm_node),
620 tm_node->id = node_id;
621 tm_node->priority = priority;
622 tm_node->weight = weight;
623 tm_node->reference_count = 0;
625 tm_node->parent = NULL;
626 tm_node->shaper_profile = shaper_profile;
627 rte_memcpy(&tm_node->params, params,
628 sizeof(struct rte_tm_node_params));
629 tm_conf->root = tm_node;
631 /* increase the reference counter of the shaper profile */
633 shaper_profile->reference_count++;
638 /* TC or queue node */
639 /* check the parent node */
640 parent_node = txgbe_tm_node_search(dev, parent_node_id,
643 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
644 error->message = "parent not exist";
647 if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT &&
648 parent_node_type != TXGBE_TM_NODE_TYPE_TC) {
649 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
650 error->message = "parent is not port or TC";
654 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
655 level_id != parent_node_type + 1) {
656 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
657 error->message = "Wrong level";
661 /* check the node number */
662 if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
663 /* check TC number */
664 nb_tcs = txgbe_tc_nb_get(dev);
665 if (tm_conf->nb_tc_node >= nb_tcs) {
666 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
667 error->message = "too many TCs";
671 /* check queue number */
672 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
673 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
674 error->message = "too many queues";
678 txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
679 if (parent_node->reference_count >= q_nb) {
680 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
681 error->message = "too many queues than TC supported";
687 * For queue, the node id means queue id.
689 if (node_id >= dev->data->nb_tx_queues) {
690 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
691 error->message = "too large queue id";
696 /* add the TC or queue node */
697 tm_node = rte_zmalloc("txgbe_tm_node",
698 sizeof(struct txgbe_tm_node),
702 tm_node->id = node_id;
703 tm_node->priority = priority;
704 tm_node->weight = weight;
705 tm_node->reference_count = 0;
706 tm_node->parent = parent_node;
707 tm_node->shaper_profile = shaper_profile;
708 rte_memcpy(&tm_node->params, params,
709 sizeof(struct rte_tm_node_params));
710 if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
711 tm_node->no = parent_node->reference_count;
712 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
714 tm_conf->nb_tc_node++;
716 tm_node->no = q_base + parent_node->reference_count;
717 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
719 tm_conf->nb_queue_node++;
721 tm_node->parent->reference_count++;
723 /* increase the reference counter of the shaper profile */
725 shaper_profile->reference_count++;
731 txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
732 struct rte_tm_error *error)
734 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
735 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
736 struct txgbe_tm_node *tm_node;
741 /* if already committed */
742 if (tm_conf->committed) {
743 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
744 error->message = "already committed";
748 if (node_id == RTE_TM_NODE_ID_NULL) {
749 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
750 error->message = "invalid node id";
754 /* check the if the node id exists */
755 tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
757 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
758 error->message = "no such node";
762 /* the node should have no child */
763 if (tm_node->reference_count) {
764 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
766 "cannot delete a node which has children";
771 if (node_type == TXGBE_TM_NODE_TYPE_PORT) {
772 if (tm_node->shaper_profile)
773 tm_node->shaper_profile->reference_count--;
775 tm_conf->root = NULL;
779 /* TC or queue node */
780 if (tm_node->shaper_profile)
781 tm_node->shaper_profile->reference_count--;
782 tm_node->parent->reference_count--;
783 if (node_type == TXGBE_TM_NODE_TYPE_TC) {
784 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
785 tm_conf->nb_tc_node--;
787 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
788 tm_conf->nb_queue_node--;
796 txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
797 int *is_leaf, struct rte_tm_error *error)
799 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
800 struct txgbe_tm_node *tm_node;
802 if (!is_leaf || !error)
805 if (node_id == RTE_TM_NODE_ID_NULL) {
806 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
807 error->message = "invalid node id";
811 /* check if the node id exists */
812 tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
814 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
815 error->message = "no such node";
819 if (node_type == TXGBE_TM_NODE_TYPE_QUEUE)
828 txgbe_level_capabilities_get(struct rte_eth_dev *dev,
830 struct rte_tm_level_capabilities *cap,
831 struct rte_tm_error *error)
833 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
838 if (level_id >= TXGBE_TM_NODE_TYPE_MAX) {
839 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
840 error->message = "too deep level";
845 if (level_id == TXGBE_TM_NODE_TYPE_PORT) {
846 cap->n_nodes_max = 1;
847 cap->n_nodes_nonleaf_max = 1;
848 cap->n_nodes_leaf_max = 0;
849 } else if (level_id == TXGBE_TM_NODE_TYPE_TC) {
851 cap->n_nodes_max = TXGBE_DCB_TC_MAX;
852 cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX;
853 cap->n_nodes_leaf_max = 0;
856 cap->n_nodes_max = hw->mac.max_tx_queues;
857 cap->n_nodes_nonleaf_max = 0;
858 cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
861 cap->non_leaf_nodes_identical = true;
862 cap->leaf_nodes_identical = true;
864 if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) {
865 cap->nonleaf.shaper_private_supported = true;
866 cap->nonleaf.shaper_private_dual_rate_supported = false;
867 cap->nonleaf.shaper_private_rate_min = 0;
868 /* 10Gbps -> 1.25GBps */
869 cap->nonleaf.shaper_private_rate_max = 1250000000ull;
870 cap->nonleaf.shaper_shared_n_max = 0;
871 if (level_id == TXGBE_TM_NODE_TYPE_PORT)
872 cap->nonleaf.sched_n_children_max =
875 cap->nonleaf.sched_n_children_max =
876 hw->mac.max_tx_queues;
877 cap->nonleaf.sched_sp_n_priorities_max = 1;
878 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
879 cap->nonleaf.sched_wfq_n_groups_max = 0;
880 cap->nonleaf.sched_wfq_weight_max = 1;
881 cap->nonleaf.stats_mask = 0;
887 cap->leaf.shaper_private_supported = true;
888 cap->leaf.shaper_private_dual_rate_supported = false;
889 cap->leaf.shaper_private_rate_min = 0;
890 /* 10Gbps -> 1.25GBps */
891 cap->leaf.shaper_private_rate_max = 1250000000ull;
892 cap->leaf.shaper_shared_n_max = 0;
893 cap->leaf.cman_head_drop_supported = false;
894 cap->leaf.cman_wred_context_private_supported = true;
895 cap->leaf.cman_wred_context_shared_n_max = 0;
896 cap->leaf.stats_mask = 0;
902 txgbe_node_capabilities_get(struct rte_eth_dev *dev,
904 struct rte_tm_node_capabilities *cap,
905 struct rte_tm_error *error)
907 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
908 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
909 struct txgbe_tm_node *tm_node;
914 if (node_id == RTE_TM_NODE_ID_NULL) {
915 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
916 error->message = "invalid node id";
920 /* check if the node id exists */
921 tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
923 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
924 error->message = "no such node";
928 cap->shaper_private_supported = true;
929 cap->shaper_private_dual_rate_supported = false;
930 cap->shaper_private_rate_min = 0;
931 /* 10Gbps -> 1.25GBps */
932 cap->shaper_private_rate_max = 1250000000ull;
933 cap->shaper_shared_n_max = 0;
935 if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) {
936 cap->leaf.cman_head_drop_supported = false;
937 cap->leaf.cman_wred_context_private_supported = true;
938 cap->leaf.cman_wred_context_shared_n_max = 0;
940 if (node_type == TXGBE_TM_NODE_TYPE_PORT)
941 cap->nonleaf.sched_n_children_max =
944 cap->nonleaf.sched_n_children_max =
945 hw->mac.max_tx_queues;
946 cap->nonleaf.sched_sp_n_priorities_max = 1;
947 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
948 cap->nonleaf.sched_wfq_n_groups_max = 0;
949 cap->nonleaf.sched_wfq_weight_max = 1;
958 txgbe_hierarchy_commit(struct rte_eth_dev *dev,
960 struct rte_tm_error *error)
962 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
963 struct txgbe_tm_node *tm_node;
970 /* check the setting */
974 /* not support port max bandwidth yet */
975 if (tm_conf->root->shaper_profile &&
976 tm_conf->root->shaper_profile->profile.peak.rate) {
977 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
978 error->message = "no port max bandwidth";
982 /* HW not support TC max bandwidth */
983 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
984 if (tm_node->shaper_profile &&
985 tm_node->shaper_profile->profile.peak.rate) {
986 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
987 error->message = "no TC max bandwidth";
992 /* queue max bandwidth */
993 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
994 if (tm_node->shaper_profile)
995 bw = tm_node->shaper_profile->profile.peak.rate;
999 /* interpret Bps to Mbps */
1000 bw = bw * 8 / 1000 / 1000;
1001 ret = txgbe_set_queue_rate_limit(dev, tm_node->no, bw);
1003 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1005 "failed to set queue max bandwidth";
1012 tm_conf->committed = true;
1016 /* clear all the traffic manager configuration */
1017 if (clear_on_fail) {
1018 txgbe_tm_conf_uninit(dev);
1019 txgbe_tm_conf_init(dev);