1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include "base/i40e_prototype.h"
8 #include "i40e_ethdev.h"
10 static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
11 struct rte_tm_capabilities *cap,
12 struct rte_tm_error *error);
13 static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
14 uint32_t shaper_profile_id,
15 struct rte_tm_shaper_params *profile,
16 struct rte_tm_error *error);
17 static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
18 uint32_t shaper_profile_id,
19 struct rte_tm_error *error);
20 static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
21 uint32_t parent_node_id, uint32_t priority,
22 uint32_t weight, uint32_t level_id,
23 struct rte_tm_node_params *params,
24 struct rte_tm_error *error);
25 static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
26 struct rte_tm_error *error);
27 static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
28 int *is_leaf, struct rte_tm_error *error);
29 static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
31 struct rte_tm_level_capabilities *cap,
32 struct rte_tm_error *error);
33 static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
35 struct rte_tm_node_capabilities *cap,
36 struct rte_tm_error *error);
37 static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
39 struct rte_tm_error *error);
41 const struct rte_tm_ops i40e_tm_ops = {
42 .capabilities_get = i40e_tm_capabilities_get,
43 .shaper_profile_add = i40e_shaper_profile_add,
44 .shaper_profile_delete = i40e_shaper_profile_del,
45 .node_add = i40e_node_add,
46 .node_delete = i40e_node_delete,
47 .node_type_get = i40e_node_type_get,
48 .level_capabilities_get = i40e_level_capabilities_get,
49 .node_capabilities_get = i40e_node_capabilities_get,
50 .hierarchy_commit = i40e_hierarchy_commit,
54 i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
60 *(const void **)arg = &i40e_tm_ops;
66 i40e_tm_conf_init(struct rte_eth_dev *dev)
68 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
70 /* initialize shaper profile list */
71 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
73 /* initialize node configuration */
74 pf->tm_conf.root = NULL;
75 TAILQ_INIT(&pf->tm_conf.tc_list);
76 TAILQ_INIT(&pf->tm_conf.queue_list);
77 pf->tm_conf.nb_tc_node = 0;
78 pf->tm_conf.nb_queue_node = 0;
79 pf->tm_conf.committed = false;
83 i40e_tm_conf_uninit(struct rte_eth_dev *dev)
85 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
86 struct i40e_tm_shaper_profile *shaper_profile;
87 struct i40e_tm_node *tm_node;
89 /* clear node configuration */
90 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
91 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
94 pf->tm_conf.nb_queue_node = 0;
95 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
96 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
99 pf->tm_conf.nb_tc_node = 0;
100 if (pf->tm_conf.root) {
101 rte_free(pf->tm_conf.root);
102 pf->tm_conf.root = NULL;
105 /* Remove all shaper profiles */
106 while ((shaper_profile =
107 TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
108 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
109 shaper_profile, node);
110 rte_free(shaper_profile);
114 static inline uint16_t
115 i40e_tc_nb_get(struct rte_eth_dev *dev)
117 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
118 struct i40e_vsi *main_vsi = pf->main_vsi;
122 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
123 if (main_vsi->enabled_tc & BIT_ULL(i))
131 i40e_tm_capabilities_get(struct rte_eth_dev *dev,
132 struct rte_tm_capabilities *cap,
133 struct rte_tm_error *error)
135 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
136 uint16_t tc_nb = i40e_tc_nb_get(dev);
141 if (tc_nb > hw->func_caps.num_tx_qp)
144 error->type = RTE_TM_ERROR_TYPE_NONE;
146 /* set all the parameters to 0 first. */
147 memset(cap, 0, sizeof(struct rte_tm_capabilities));
150 * support port + TCs + queues
151 * here shows the max capability not the current configuration.
153 cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
154 cap->n_levels_max = 3; /* port, TC, queue */
155 cap->non_leaf_nodes_identical = 1;
156 cap->leaf_nodes_identical = 1;
157 cap->shaper_n_max = cap->n_nodes_max;
158 cap->shaper_private_n_max = cap->n_nodes_max;
159 cap->shaper_private_dual_rate_n_max = 0;
160 cap->shaper_private_rate_min = 0;
161 /* 40Gbps -> 5GBps */
162 cap->shaper_private_rate_max = 5000000000ull;
163 cap->shaper_private_packet_mode_supported = 0;
164 cap->shaper_private_byte_mode_supported = 1;
165 cap->shaper_shared_n_max = 0;
166 cap->shaper_shared_n_nodes_per_shaper_max = 0;
167 cap->shaper_shared_n_shapers_per_node_max = 0;
168 cap->shaper_shared_dual_rate_n_max = 0;
169 cap->shaper_shared_rate_min = 0;
170 cap->shaper_shared_rate_max = 0;
171 cap->shaper_shared_packet_mode_supported = 0;
172 cap->shaper_shared_byte_mode_supported = 0;
173 cap->sched_n_children_max = hw->func_caps.num_tx_qp;
175 * HW supports SP. But no plan to support it now.
176 * So, all the nodes should have the same priority.
178 cap->sched_sp_n_priorities_max = 1;
179 cap->sched_wfq_n_children_per_group_max = 0;
180 cap->sched_wfq_n_groups_max = 0;
182 * SW only supports fair round robin now.
183 * So, all the nodes should have the same weight.
185 cap->sched_wfq_weight_max = 1;
186 cap->sched_wfq_packet_mode_supported = 0;
187 cap->sched_wfq_byte_mode_supported = 0;
188 cap->cman_head_drop_supported = 0;
189 cap->dynamic_update_mask = 0;
190 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
191 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
192 cap->cman_wred_context_n_max = 0;
193 cap->cman_wred_context_private_n_max = 0;
194 cap->cman_wred_context_shared_n_max = 0;
195 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
196 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
202 static inline struct i40e_tm_shaper_profile *
203 i40e_shaper_profile_search(struct rte_eth_dev *dev,
204 uint32_t shaper_profile_id)
206 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
207 struct i40e_shaper_profile_list *shaper_profile_list =
208 &pf->tm_conf.shaper_profile_list;
209 struct i40e_tm_shaper_profile *shaper_profile;
211 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
212 if (shaper_profile_id == shaper_profile->shaper_profile_id)
213 return shaper_profile;
220 i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
221 struct rte_tm_error *error)
223 /* min rate not supported */
224 if (profile->committed.rate) {
225 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
226 error->message = "committed rate not supported";
229 /* min bucket size not supported */
230 if (profile->committed.size) {
231 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
232 error->message = "committed bucket size not supported";
235 /* max bucket size not supported */
236 if (profile->peak.size) {
237 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
238 error->message = "peak bucket size not supported";
241 /* length adjustment not supported */
242 if (profile->pkt_length_adjust) {
243 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
244 error->message = "packet length adjustment not supported";
252 i40e_shaper_profile_add(struct rte_eth_dev *dev,
253 uint32_t shaper_profile_id,
254 struct rte_tm_shaper_params *profile,
255 struct rte_tm_error *error)
257 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
258 struct i40e_tm_shaper_profile *shaper_profile;
261 if (!profile || !error)
264 ret = i40e_shaper_profile_param_check(profile, error);
268 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
270 if (shaper_profile) {
271 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
272 error->message = "profile ID exist";
276 shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
277 sizeof(struct i40e_tm_shaper_profile),
281 shaper_profile->shaper_profile_id = shaper_profile_id;
282 rte_memcpy(&shaper_profile->profile, profile,
283 sizeof(struct rte_tm_shaper_params));
284 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
285 shaper_profile, node);
291 i40e_shaper_profile_del(struct rte_eth_dev *dev,
292 uint32_t shaper_profile_id,
293 struct rte_tm_error *error)
295 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
296 struct i40e_tm_shaper_profile *shaper_profile;
301 shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
303 if (!shaper_profile) {
304 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
305 error->message = "profile ID not exist";
309 /* don't delete a profile if it's used by one or several nodes */
310 if (shaper_profile->reference_count) {
311 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
312 error->message = "profile in use";
316 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
317 rte_free(shaper_profile);
322 static inline struct i40e_tm_node *
323 i40e_tm_node_search(struct rte_eth_dev *dev,
324 uint32_t node_id, enum i40e_tm_node_type *node_type)
326 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
327 struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
328 struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
329 struct i40e_tm_node *tm_node;
331 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
332 *node_type = I40E_TM_NODE_TYPE_PORT;
333 return pf->tm_conf.root;
336 TAILQ_FOREACH(tm_node, tc_list, node) {
337 if (tm_node->id == node_id) {
338 *node_type = I40E_TM_NODE_TYPE_TC;
343 TAILQ_FOREACH(tm_node, queue_list, node) {
344 if (tm_node->id == node_id) {
345 *node_type = I40E_TM_NODE_TYPE_QUEUE;
354 i40e_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
355 uint32_t priority, uint32_t weight,
356 struct rte_tm_node_params *params,
357 struct rte_tm_error *error)
359 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
361 if (node_id == RTE_TM_NODE_ID_NULL) {
362 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
363 error->message = "invalid node id";
368 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
369 error->message = "priority should be 0";
374 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
375 error->message = "weight must be 1";
379 /* not support shared shaper */
380 if (params->shared_shaper_id) {
381 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
382 error->message = "shared shaper not supported";
385 if (params->n_shared_shapers) {
386 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
387 error->message = "shared shaper not supported";
391 /* for non-leaf node */
392 if (node_id >= hw->func_caps.num_tx_qp) {
393 if (params->nonleaf.wfq_weight_mode) {
395 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
396 error->message = "WFQ not supported";
399 if (params->nonleaf.n_sp_priorities != 1) {
401 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
402 error->message = "SP priority not supported";
404 } else if (params->nonleaf.wfq_weight_mode &&
405 !(*params->nonleaf.wfq_weight_mode)) {
407 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
408 error->message = "WFP should be byte mode";
416 if (params->leaf.cman) {
417 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
418 error->message = "Congestion management not supported";
421 if (params->leaf.wred.wred_profile_id !=
422 RTE_TM_WRED_PROFILE_ID_NONE) {
424 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
425 error->message = "WRED not supported";
428 if (params->leaf.wred.shared_wred_context_id) {
430 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
431 error->message = "WRED not supported";
434 if (params->leaf.wred.n_shared_wred_contexts) {
436 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
437 error->message = "WRED not supported";
445 * Now the TC and queue configuration is controlled by DCB.
446 * We need check if the node configuration follows the DCB configuration.
447 * In the future, we may use TM to cover DCB.
450 i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
451 uint32_t parent_node_id, uint32_t priority,
452 uint32_t weight, uint32_t level_id,
453 struct rte_tm_node_params *params,
454 struct rte_tm_error *error)
456 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
457 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
458 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
459 enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
460 struct i40e_tm_shaper_profile *shaper_profile = NULL;
461 struct i40e_tm_node *tm_node;
462 struct i40e_tm_node *parent_node;
466 if (!params || !error)
469 /* if already committed */
470 if (pf->tm_conf.committed) {
471 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
472 error->message = "already committed";
476 ret = i40e_node_param_check(dev, node_id, priority, weight,
481 /* check if the node ID is already used */
482 if (i40e_tm_node_search(dev, node_id, &node_type)) {
483 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
484 error->message = "node id already used";
488 /* check the shaper profile id */
489 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
490 shaper_profile = i40e_shaper_profile_search(
491 dev, params->shaper_profile_id);
492 if (!shaper_profile) {
494 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
495 error->message = "shaper profile not exist";
500 /* root node if not have a parent */
501 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
503 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
504 level_id > I40E_TM_NODE_TYPE_PORT) {
505 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
506 error->message = "Wrong level";
510 /* obviously no more than one root */
511 if (pf->tm_conf.root) {
512 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
513 error->message = "already have a root";
517 /* add the root node */
518 tm_node = rte_zmalloc("i40e_tm_node",
519 sizeof(struct i40e_tm_node),
523 tm_node->id = node_id;
524 tm_node->priority = priority;
525 tm_node->weight = weight;
526 tm_node->reference_count = 0;
527 tm_node->parent = NULL;
528 tm_node->shaper_profile = shaper_profile;
529 rte_memcpy(&tm_node->params, params,
530 sizeof(struct rte_tm_node_params));
531 pf->tm_conf.root = tm_node;
533 /* increase the reference counter of the shaper profile */
535 shaper_profile->reference_count++;
540 /* TC or queue node */
541 /* check the parent node */
542 parent_node = i40e_tm_node_search(dev, parent_node_id,
545 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
546 error->message = "parent not exist";
549 if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
550 parent_node_type != I40E_TM_NODE_TYPE_TC) {
551 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
552 error->message = "parent is not port or TC";
556 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
557 level_id != parent_node_type + 1) {
558 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
559 error->message = "Wrong level";
563 /* check the node number */
564 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
565 /* check the TC number */
566 tc_nb = i40e_tc_nb_get(dev);
567 if (pf->tm_conf.nb_tc_node >= tc_nb) {
568 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
569 error->message = "too many TCs";
573 /* check the queue number */
574 if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
575 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
576 error->message = "too many queues";
582 * For queue, the node id means queue id.
584 if (node_id >= hw->func_caps.num_tx_qp) {
585 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
586 error->message = "too large queue id";
591 /* add the TC or queue node */
592 tm_node = rte_zmalloc("i40e_tm_node",
593 sizeof(struct i40e_tm_node),
597 tm_node->id = node_id;
598 tm_node->priority = priority;
599 tm_node->weight = weight;
600 tm_node->reference_count = 0;
601 tm_node->parent = parent_node;
602 tm_node->shaper_profile = shaper_profile;
603 rte_memcpy(&tm_node->params, params,
604 sizeof(struct rte_tm_node_params));
605 if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
606 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
608 pf->tm_conf.nb_tc_node++;
610 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
612 pf->tm_conf.nb_queue_node++;
614 tm_node->parent->reference_count++;
616 /* increase the reference counter of the shaper profile */
618 shaper_profile->reference_count++;
624 i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
625 struct rte_tm_error *error)
627 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
628 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
629 struct i40e_tm_node *tm_node;
634 /* if already committed */
635 if (pf->tm_conf.committed) {
636 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
637 error->message = "already committed";
641 if (node_id == RTE_TM_NODE_ID_NULL) {
642 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
643 error->message = "invalid node id";
647 /* check if the node id exists */
648 tm_node = i40e_tm_node_search(dev, node_id, &node_type);
650 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
651 error->message = "no such node";
655 /* the node should have no child */
656 if (tm_node->reference_count) {
657 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
659 "cannot delete a node which has children";
664 if (node_type == I40E_TM_NODE_TYPE_PORT) {
665 if (tm_node->shaper_profile)
666 tm_node->shaper_profile->reference_count--;
668 pf->tm_conf.root = NULL;
672 /* TC or queue node */
673 if (tm_node->shaper_profile)
674 tm_node->shaper_profile->reference_count--;
675 tm_node->parent->reference_count--;
676 if (node_type == I40E_TM_NODE_TYPE_TC) {
677 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
678 pf->tm_conf.nb_tc_node--;
680 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
681 pf->tm_conf.nb_queue_node--;
689 i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
690 int *is_leaf, struct rte_tm_error *error)
692 enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
693 struct i40e_tm_node *tm_node;
695 if (!is_leaf || !error)
698 if (node_id == RTE_TM_NODE_ID_NULL) {
699 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
700 error->message = "invalid node id";
704 /* check if the node id exists */
705 tm_node = i40e_tm_node_search(dev, node_id, &node_type);
707 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
708 error->message = "no such node";
712 if (node_type == I40E_TM_NODE_TYPE_QUEUE)
721 i40e_level_capabilities_get(struct rte_eth_dev *dev,
723 struct rte_tm_level_capabilities *cap,
724 struct rte_tm_error *error)
726 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
731 if (level_id >= I40E_TM_NODE_TYPE_MAX) {
732 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
733 error->message = "too deep level";
738 if (level_id == I40E_TM_NODE_TYPE_PORT) {
739 cap->n_nodes_max = 1;
740 cap->n_nodes_nonleaf_max = 1;
741 cap->n_nodes_leaf_max = 0;
742 } else if (level_id == I40E_TM_NODE_TYPE_TC) {
744 cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
745 cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
746 cap->n_nodes_leaf_max = 0;
749 cap->n_nodes_max = hw->func_caps.num_tx_qp;
750 cap->n_nodes_nonleaf_max = 0;
751 cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
754 cap->non_leaf_nodes_identical = true;
755 cap->leaf_nodes_identical = true;
757 if (level_id != I40E_TM_NODE_TYPE_QUEUE) {
758 cap->nonleaf.shaper_private_supported = true;
759 cap->nonleaf.shaper_private_dual_rate_supported = false;
760 cap->nonleaf.shaper_private_rate_min = 0;
761 /* 40Gbps -> 5GBps */
762 cap->nonleaf.shaper_private_rate_max = 5000000000ull;
763 cap->nonleaf.shaper_private_packet_mode_supported = 0;
764 cap->nonleaf.shaper_private_byte_mode_supported = 1;
765 cap->nonleaf.shaper_shared_n_max = 0;
766 cap->nonleaf.shaper_shared_packet_mode_supported = 0;
767 cap->nonleaf.shaper_shared_byte_mode_supported = 0;
768 if (level_id == I40E_TM_NODE_TYPE_PORT)
769 cap->nonleaf.sched_n_children_max =
770 I40E_MAX_TRAFFIC_CLASS;
772 cap->nonleaf.sched_n_children_max =
773 hw->func_caps.num_tx_qp;
774 cap->nonleaf.sched_sp_n_priorities_max = 1;
775 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
776 cap->nonleaf.sched_wfq_n_groups_max = 0;
777 cap->nonleaf.sched_wfq_weight_max = 1;
778 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
779 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
780 cap->nonleaf.stats_mask = 0;
786 cap->leaf.shaper_private_supported = true;
787 cap->leaf.shaper_private_dual_rate_supported = false;
788 cap->leaf.shaper_private_rate_min = 0;
789 /* 40Gbps -> 5GBps */
790 cap->leaf.shaper_private_rate_max = 5000000000ull;
791 cap->leaf.shaper_private_packet_mode_supported = 0;
792 cap->leaf.shaper_private_byte_mode_supported = 1;
793 cap->leaf.shaper_shared_n_max = 0;
794 cap->leaf.shaper_shared_packet_mode_supported = 0;
795 cap->leaf.shaper_shared_byte_mode_supported = 0;
796 cap->leaf.cman_head_drop_supported = false;
797 cap->leaf.cman_wred_context_private_supported = true;
798 cap->leaf.cman_wred_context_shared_n_max = 0;
799 cap->leaf.stats_mask = 0;
805 i40e_node_capabilities_get(struct rte_eth_dev *dev,
807 struct rte_tm_node_capabilities *cap,
808 struct rte_tm_error *error)
810 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
811 enum i40e_tm_node_type node_type;
812 struct i40e_tm_node *tm_node;
817 if (node_id == RTE_TM_NODE_ID_NULL) {
818 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
819 error->message = "invalid node id";
823 /* check if the node id exists */
824 tm_node = i40e_tm_node_search(dev, node_id, &node_type);
826 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
827 error->message = "no such node";
831 cap->shaper_private_supported = true;
832 cap->shaper_private_dual_rate_supported = false;
833 cap->shaper_private_rate_min = 0;
834 /* 40Gbps -> 5GBps */
835 cap->shaper_private_rate_max = 5000000000ull;
836 cap->shaper_private_packet_mode_supported = 0;
837 cap->shaper_private_byte_mode_supported = 1;
838 cap->shaper_shared_n_max = 0;
839 cap->shaper_shared_packet_mode_supported = 0;
840 cap->shaper_shared_byte_mode_supported = 0;
842 if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
843 cap->leaf.cman_head_drop_supported = false;
844 cap->leaf.cman_wred_context_private_supported = true;
845 cap->leaf.cman_wred_context_shared_n_max = 0;
847 if (node_type == I40E_TM_NODE_TYPE_PORT)
848 cap->nonleaf.sched_n_children_max =
849 I40E_MAX_TRAFFIC_CLASS;
851 cap->nonleaf.sched_n_children_max =
852 hw->func_caps.num_tx_qp;
853 cap->nonleaf.sched_sp_n_priorities_max = 1;
854 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
855 cap->nonleaf.sched_wfq_n_groups_max = 0;
856 cap->nonleaf.sched_wfq_weight_max = 1;
857 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
858 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
867 i40e_hierarchy_commit(struct rte_eth_dev *dev,
869 struct rte_tm_error *error)
871 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
872 struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
873 struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
874 struct i40e_tm_node *tm_node;
875 struct i40e_vsi *vsi;
877 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
886 /* check the setting */
887 if (!pf->tm_conf.root)
891 hw = I40E_VSI_TO_HW(vsi);
894 * Don't support bandwidth control for port and TCs in parallel.
895 * If the port has a max bandwidth, the TCs should have none.
898 if (pf->tm_conf.root->shaper_profile)
899 bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
903 /* check if any TC has a max bandwidth */
904 TAILQ_FOREACH(tm_node, tc_list, node) {
905 if (tm_node->shaper_profile &&
906 tm_node->shaper_profile->profile.peak.rate) {
907 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
908 error->message = "no port and TC max bandwidth"
914 /* interpret Bps to 50Mbps */
915 bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
917 /* set the max bandwidth */
918 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
919 (uint16_t)bw, 0, NULL);
921 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
922 error->message = "fail to set port max bandwidth";
930 memset(&tc_bw, 0, sizeof(tc_bw));
931 tc_bw.tc_valid_bits = vsi->enabled_tc;
932 tc_map = vsi->enabled_tc;
933 TAILQ_FOREACH(tm_node, tc_list, node) {
934 if (!tm_node->reference_count) {
935 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
936 error->message = "TC without queue assigned";
941 while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
943 if (i >= I40E_MAX_TRAFFIC_CLASS) {
944 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
945 error->message = "cannot find the TC";
948 tc_map &= ~BIT_ULL(i);
950 if (tm_node->shaper_profile)
951 bw = tm_node->shaper_profile->profile.peak.rate;
957 /* interpret Bps to 50Mbps */
958 bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
960 tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw);
963 TAILQ_FOREACH(tm_node, queue_list, node) {
964 if (tm_node->shaper_profile)
965 bw = tm_node->shaper_profile->profile.peak.rate;
969 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
970 error->message = "not support queue QoS";
975 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
977 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
978 error->message = "fail to set TC max bandwidth";
983 pf->tm_conf.committed = true;
987 /* clear all the traffic manager configuration */
989 i40e_tm_conf_uninit(dev);
990 i40e_tm_conf_init(dev);