4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
36 #include "ixgbe_ethdev.h"
38 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
39 struct rte_tm_capabilities *cap,
40 struct rte_tm_error *error);
41 static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
42 uint32_t shaper_profile_id,
43 struct rte_tm_shaper_params *profile,
44 struct rte_tm_error *error);
45 static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
46 uint32_t shaper_profile_id,
47 struct rte_tm_error *error);
48 static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
49 uint32_t parent_node_id, uint32_t priority,
50 uint32_t weight, uint32_t level_id,
51 struct rte_tm_node_params *params,
52 struct rte_tm_error *error);
53 static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
54 struct rte_tm_error *error);
55 static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
56 int *is_leaf, struct rte_tm_error *error);
57 static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
59 struct rte_tm_level_capabilities *cap,
60 struct rte_tm_error *error);
61 static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
63 struct rte_tm_node_capabilities *cap,
64 struct rte_tm_error *error);
66 const struct rte_tm_ops ixgbe_tm_ops = {
67 .capabilities_get = ixgbe_tm_capabilities_get,
68 .shaper_profile_add = ixgbe_shaper_profile_add,
69 .shaper_profile_delete = ixgbe_shaper_profile_del,
70 .node_add = ixgbe_node_add,
71 .node_delete = ixgbe_node_delete,
72 .node_type_get = ixgbe_node_type_get,
73 .level_capabilities_get = ixgbe_level_capabilities_get,
74 .node_capabilities_get = ixgbe_node_capabilities_get,
78 ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
84 *(const void **)arg = &ixgbe_tm_ops;
90 ixgbe_tm_conf_init(struct rte_eth_dev *dev)
92 struct ixgbe_tm_conf *tm_conf =
93 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
95 /* initialize shaper profile list */
96 TAILQ_INIT(&tm_conf->shaper_profile_list);
98 /* initialize node configuration */
100 TAILQ_INIT(&tm_conf->queue_list);
101 TAILQ_INIT(&tm_conf->tc_list);
102 tm_conf->nb_tc_node = 0;
103 tm_conf->nb_queue_node = 0;
104 tm_conf->committed = false;
108 ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
110 struct ixgbe_tm_conf *tm_conf =
111 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
112 struct ixgbe_tm_shaper_profile *shaper_profile;
113 struct ixgbe_tm_node *tm_node;
115 /* clear node configuration */
116 while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
117 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
120 tm_conf->nb_queue_node = 0;
121 while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
122 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
125 tm_conf->nb_tc_node = 0;
127 rte_free(tm_conf->root);
128 tm_conf->root = NULL;
131 /* Remove all shaper profiles */
132 while ((shaper_profile =
133 TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
134 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
135 shaper_profile, node);
136 rte_free(shaper_profile);
140 static inline uint8_t
141 ixgbe_tc_nb_get(struct rte_eth_dev *dev)
143 struct rte_eth_conf *eth_conf;
146 eth_conf = &dev->data->dev_conf;
147 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
148 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
149 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
150 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
163 ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
164 struct rte_tm_capabilities *cap,
165 struct rte_tm_error *error)
167 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
168 uint8_t tc_nb = ixgbe_tc_nb_get(dev);
173 if (tc_nb > hw->mac.max_tx_queues)
176 error->type = RTE_TM_ERROR_TYPE_NONE;
178 /* set all the parameters to 0 first. */
179 memset(cap, 0, sizeof(struct rte_tm_capabilities));
182 * here is the max capability not the current configuration.
184 /* port + TCs + queues */
185 cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
186 hw->mac.max_tx_queues;
187 cap->n_levels_max = 3;
188 cap->non_leaf_nodes_identical = 1;
189 cap->leaf_nodes_identical = 1;
190 cap->shaper_n_max = cap->n_nodes_max;
191 cap->shaper_private_n_max = cap->n_nodes_max;
192 cap->shaper_private_dual_rate_n_max = 0;
193 cap->shaper_private_rate_min = 0;
194 /* 10Gbps -> 1.25GBps */
195 cap->shaper_private_rate_max = 1250000000ull;
196 cap->shaper_shared_n_max = 0;
197 cap->shaper_shared_n_nodes_per_shaper_max = 0;
198 cap->shaper_shared_n_shapers_per_node_max = 0;
199 cap->shaper_shared_dual_rate_n_max = 0;
200 cap->shaper_shared_rate_min = 0;
201 cap->shaper_shared_rate_max = 0;
202 cap->sched_n_children_max = hw->mac.max_tx_queues;
204 * HW supports SP. But no plan to support it now.
205 * So, all the nodes should have the same priority.
207 cap->sched_sp_n_priorities_max = 1;
208 cap->sched_wfq_n_children_per_group_max = 0;
209 cap->sched_wfq_n_groups_max = 0;
211 * SW only supports fair round robin now.
212 * So, all the nodes should have the same weight.
214 cap->sched_wfq_weight_max = 1;
215 cap->cman_head_drop_supported = 0;
216 cap->dynamic_update_mask = 0;
217 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
218 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
219 cap->cman_wred_context_n_max = 0;
220 cap->cman_wred_context_private_n_max = 0;
221 cap->cman_wred_context_shared_n_max = 0;
222 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
223 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
229 static inline struct ixgbe_tm_shaper_profile *
230 ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
231 uint32_t shaper_profile_id)
233 struct ixgbe_tm_conf *tm_conf =
234 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
235 struct ixgbe_shaper_profile_list *shaper_profile_list =
236 &tm_conf->shaper_profile_list;
237 struct ixgbe_tm_shaper_profile *shaper_profile;
239 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
240 if (shaper_profile_id == shaper_profile->shaper_profile_id)
241 return shaper_profile;
248 ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
249 struct rte_tm_error *error)
251 /* min rate not supported */
252 if (profile->committed.rate) {
253 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
254 error->message = "committed rate not supported";
257 /* min bucket size not supported */
258 if (profile->committed.size) {
259 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
260 error->message = "committed bucket size not supported";
263 /* max bucket size not supported */
264 if (profile->peak.size) {
265 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
266 error->message = "peak bucket size not supported";
269 /* length adjustment not supported */
270 if (profile->pkt_length_adjust) {
271 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
272 error->message = "packet length adjustment not supported";
280 ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
281 uint32_t shaper_profile_id,
282 struct rte_tm_shaper_params *profile,
283 struct rte_tm_error *error)
285 struct ixgbe_tm_conf *tm_conf =
286 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
287 struct ixgbe_tm_shaper_profile *shaper_profile;
290 if (!profile || !error)
293 ret = ixgbe_shaper_profile_param_check(profile, error);
297 shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
299 if (shaper_profile) {
300 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
301 error->message = "profile ID exist";
305 shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
306 sizeof(struct ixgbe_tm_shaper_profile),
310 shaper_profile->shaper_profile_id = shaper_profile_id;
311 (void)rte_memcpy(&shaper_profile->profile, profile,
312 sizeof(struct rte_tm_shaper_params));
313 TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
314 shaper_profile, node);
320 ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
321 uint32_t shaper_profile_id,
322 struct rte_tm_error *error)
324 struct ixgbe_tm_conf *tm_conf =
325 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
326 struct ixgbe_tm_shaper_profile *shaper_profile;
331 shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
333 if (!shaper_profile) {
334 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
335 error->message = "profile ID not exist";
339 /* don't delete a profile if it's used by one or several nodes */
340 if (shaper_profile->reference_count) {
341 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
342 error->message = "profile in use";
346 TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
347 rte_free(shaper_profile);
352 static inline struct ixgbe_tm_node *
353 ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
354 enum ixgbe_tm_node_type *node_type)
356 struct ixgbe_tm_conf *tm_conf =
357 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
358 struct ixgbe_tm_node *tm_node;
360 if (tm_conf->root && tm_conf->root->id == node_id) {
361 *node_type = IXGBE_TM_NODE_TYPE_PORT;
362 return tm_conf->root;
365 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
366 if (tm_node->id == node_id) {
367 *node_type = IXGBE_TM_NODE_TYPE_TC;
372 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
373 if (tm_node->id == node_id) {
374 *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
383 ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
384 uint16_t *base, uint16_t *nb)
386 uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
387 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
388 uint16_t vf_num = pci_dev->max_vfs;
397 if (vf_num >= ETH_32_POOLS) {
400 } else if (vf_num >= ETH_16_POOLS) {
410 *base = vf_num * nb_tcs + tc_node_no;
414 if (nb_tcs == ETH_8_TCS) {
415 switch (tc_node_no) {
452 switch (tc_node_no) {
454 * If no VF and no DCB, only 64 queues can be used.
455 * This case also be covered by this "case 0".
481 ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
482 uint32_t priority, uint32_t weight,
483 struct rte_tm_node_params *params,
484 struct rte_tm_error *error)
486 if (node_id == RTE_TM_NODE_ID_NULL) {
487 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
488 error->message = "invalid node id";
493 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
494 error->message = "priority should be 0";
499 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
500 error->message = "weight must be 1";
504 /* not support shared shaper */
505 if (params->shared_shaper_id) {
506 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
507 error->message = "shared shaper not supported";
510 if (params->n_shared_shapers) {
511 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
512 error->message = "shared shaper not supported";
517 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
518 /* check the unsupported parameters */
519 if (params->nonleaf.wfq_weight_mode) {
521 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
522 error->message = "WFQ not supported";
525 if (params->nonleaf.n_sp_priorities != 1) {
527 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
528 error->message = "SP priority not supported";
530 } else if (params->nonleaf.wfq_weight_mode &&
531 !(*params->nonleaf.wfq_weight_mode)) {
533 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
534 error->message = "WFP should be byte mode";
541 /* for TC or queue node */
542 /* check the unsupported parameters */
543 if (params->leaf.cman) {
544 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
545 error->message = "Congestion management not supported";
548 if (params->leaf.wred.wred_profile_id !=
549 RTE_TM_WRED_PROFILE_ID_NONE) {
551 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
552 error->message = "WRED not supported";
555 if (params->leaf.wred.shared_wred_context_id) {
557 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
558 error->message = "WRED not supported";
561 if (params->leaf.wred.n_shared_wred_contexts) {
563 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
564 error->message = "WRED not supported";
572 * Now the TC and queue configuration is controlled by DCB.
573 * We need check if the node configuration follows the DCB configuration.
574 * In the future, we may use TM to cover DCB.
577 ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
578 uint32_t parent_node_id, uint32_t priority,
579 uint32_t weight, uint32_t level_id,
580 struct rte_tm_node_params *params,
581 struct rte_tm_error *error)
583 struct ixgbe_tm_conf *tm_conf =
584 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
585 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
586 enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
587 struct ixgbe_tm_shaper_profile *shaper_profile;
588 struct ixgbe_tm_node *tm_node;
589 struct ixgbe_tm_node *parent_node;
595 if (!params || !error)
598 /* if already committed */
599 if (tm_conf->committed) {
600 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
601 error->message = "already committed";
605 ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
610 /* check if the node ID is already used */
611 if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
612 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
613 error->message = "node id already used";
617 /* check the shaper profile id */
618 shaper_profile = ixgbe_shaper_profile_search(dev,
619 params->shaper_profile_id);
620 if (!shaper_profile) {
621 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
622 error->message = "shaper profile not exist";
626 /* root node if not have a parent */
627 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
629 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
630 level_id > IXGBE_TM_NODE_TYPE_PORT) {
631 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
632 error->message = "Wrong level";
636 /* obviously no more than one root */
638 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
639 error->message = "already have a root";
643 /* add the root node */
644 tm_node = rte_zmalloc("ixgbe_tm_node",
645 sizeof(struct ixgbe_tm_node),
649 tm_node->id = node_id;
650 tm_node->priority = priority;
651 tm_node->weight = weight;
652 tm_node->reference_count = 0;
654 tm_node->parent = NULL;
655 tm_node->shaper_profile = shaper_profile;
656 (void)rte_memcpy(&tm_node->params, params,
657 sizeof(struct rte_tm_node_params));
658 tm_conf->root = tm_node;
660 /* increase the reference counter of the shaper profile */
661 shaper_profile->reference_count++;
666 /* TC or queue node */
667 /* check the parent node */
668 parent_node = ixgbe_tm_node_search(dev, parent_node_id,
671 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
672 error->message = "parent not exist";
675 if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
676 parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
677 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
678 error->message = "parent is not port or TC";
682 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
683 level_id != parent_node_type + 1) {
684 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
685 error->message = "Wrong level";
689 /* check the node number */
690 if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
691 /* check TC number */
692 nb_tcs = ixgbe_tc_nb_get(dev);
693 if (tm_conf->nb_tc_node >= nb_tcs) {
694 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
695 error->message = "too many TCs";
699 /* check queue number */
700 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
701 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
702 error->message = "too many queues";
706 ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
707 if (parent_node->reference_count >= q_nb) {
708 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
709 error->message = "too many queues than TC supported";
715 * For queue, the node id means queue id.
717 if (node_id >= dev->data->nb_tx_queues) {
718 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
719 error->message = "too large queue id";
724 /* add the TC or queue node */
725 tm_node = rte_zmalloc("ixgbe_tm_node",
726 sizeof(struct ixgbe_tm_node),
730 tm_node->id = node_id;
731 tm_node->priority = priority;
732 tm_node->weight = weight;
733 tm_node->reference_count = 0;
734 tm_node->parent = parent_node;
735 tm_node->shaper_profile = shaper_profile;
736 (void)rte_memcpy(&tm_node->params, params,
737 sizeof(struct rte_tm_node_params));
738 if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
739 tm_node->no = parent_node->reference_count;
740 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
742 tm_conf->nb_tc_node++;
744 tm_node->no = q_base + parent_node->reference_count;
745 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
747 tm_conf->nb_queue_node++;
749 tm_node->parent->reference_count++;
751 /* increase the reference counter of the shaper profile */
752 shaper_profile->reference_count++;
758 ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
759 struct rte_tm_error *error)
761 struct ixgbe_tm_conf *tm_conf =
762 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
763 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
764 struct ixgbe_tm_node *tm_node;
769 /* if already committed */
770 if (tm_conf->committed) {
771 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
772 error->message = "already committed";
776 if (node_id == RTE_TM_NODE_ID_NULL) {
777 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
778 error->message = "invalid node id";
782 /* check the if the node id exists */
783 tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
785 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
786 error->message = "no such node";
790 /* the node should have no child */
791 if (tm_node->reference_count) {
792 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
794 "cannot delete a node which has children";
799 if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
800 tm_node->shaper_profile->reference_count--;
802 tm_conf->root = NULL;
806 /* TC or queue node */
807 tm_node->shaper_profile->reference_count--;
808 tm_node->parent->reference_count--;
809 if (node_type == IXGBE_TM_NODE_TYPE_TC) {
810 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
811 tm_conf->nb_tc_node--;
813 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
814 tm_conf->nb_queue_node--;
822 ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
823 int *is_leaf, struct rte_tm_error *error)
825 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
826 struct ixgbe_tm_node *tm_node;
828 if (!is_leaf || !error)
831 if (node_id == RTE_TM_NODE_ID_NULL) {
832 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
833 error->message = "invalid node id";
837 /* check if the node id exists */
838 tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
840 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
841 error->message = "no such node";
845 if (node_type == IXGBE_TM_NODE_TYPE_QUEUE)
854 ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
856 struct rte_tm_level_capabilities *cap,
857 struct rte_tm_error *error)
859 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
864 if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
865 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
866 error->message = "too deep level";
871 if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
872 cap->n_nodes_max = 1;
873 cap->n_nodes_nonleaf_max = 1;
874 cap->n_nodes_leaf_max = 0;
875 cap->non_leaf_nodes_identical = true;
876 cap->leaf_nodes_identical = true;
877 cap->nonleaf.shaper_private_supported = true;
878 cap->nonleaf.shaper_private_dual_rate_supported = false;
879 cap->nonleaf.shaper_private_rate_min = 0;
880 /* 10Gbps -> 1.25GBps */
881 cap->nonleaf.shaper_private_rate_max = 1250000000ull;
882 cap->nonleaf.shaper_shared_n_max = 0;
883 cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
884 cap->nonleaf.sched_sp_n_priorities_max = 1;
885 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
886 cap->nonleaf.sched_wfq_n_groups_max = 0;
887 cap->nonleaf.sched_wfq_weight_max = 1;
888 cap->nonleaf.stats_mask = 0;
893 /* TC or queue node */
894 if (level_id == IXGBE_TM_NODE_TYPE_TC) {
896 cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
897 cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
898 cap->n_nodes_leaf_max = 0;
899 cap->non_leaf_nodes_identical = true;
902 cap->n_nodes_max = hw->mac.max_tx_queues;
903 cap->n_nodes_nonleaf_max = 0;
904 cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
905 cap->non_leaf_nodes_identical = true;
907 cap->leaf_nodes_identical = true;
908 cap->leaf.shaper_private_supported = true;
909 cap->leaf.shaper_private_dual_rate_supported = false;
910 cap->leaf.shaper_private_rate_min = 0;
911 /* 10Gbps -> 1.25GBps */
912 cap->leaf.shaper_private_rate_max = 1250000000ull;
913 cap->leaf.shaper_shared_n_max = 0;
914 cap->leaf.cman_head_drop_supported = false;
915 cap->leaf.cman_wred_context_private_supported = true;
916 cap->leaf.cman_wred_context_shared_n_max = 0;
917 cap->leaf.stats_mask = 0;
923 ixgbe_node_capabilities_get(struct rte_eth_dev *dev,
925 struct rte_tm_node_capabilities *cap,
926 struct rte_tm_error *error)
928 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
929 enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
930 struct ixgbe_tm_node *tm_node;
935 if (node_id == RTE_TM_NODE_ID_NULL) {
936 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
937 error->message = "invalid node id";
941 /* check if the node id exists */
942 tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
944 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
945 error->message = "no such node";
949 cap->shaper_private_supported = true;
950 cap->shaper_private_dual_rate_supported = false;
951 cap->shaper_private_rate_min = 0;
952 /* 10Gbps -> 1.25GBps */
953 cap->shaper_private_rate_max = 1250000000ull;
954 cap->shaper_shared_n_max = 0;
956 if (node_type == IXGBE_TM_NODE_TYPE_QUEUE) {
957 cap->leaf.cman_head_drop_supported = false;
958 cap->leaf.cman_wred_context_private_supported = true;
959 cap->leaf.cman_wred_context_shared_n_max = 0;
961 if (node_type == IXGBE_TM_NODE_TYPE_PORT)
962 cap->nonleaf.sched_n_children_max =
963 IXGBE_DCB_MAX_TRAFFIC_CLASS;
965 cap->nonleaf.sched_n_children_max =
966 hw->mac.max_tx_queues;
967 cap->nonleaf.sched_sp_n_priorities_max = 1;
968 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
969 cap->nonleaf.sched_wfq_n_groups_max = 0;
970 cap->nonleaf.sched_wfq_weight_max = 1;