1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2022 Intel Corporation
4 #include <rte_tm_driver.h>
6 #include "ice_ethdev.h"
9 static int ice_hierarchy_commit(struct rte_eth_dev *dev,
11 __rte_unused struct rte_tm_error *error);
12 static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
13 uint32_t parent_node_id, uint32_t priority,
14 uint32_t weight, uint32_t level_id,
15 struct rte_tm_node_params *params,
16 struct rte_tm_error *error);
17 static int ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
18 struct rte_tm_error *error);
19 static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
20 int *is_leaf, struct rte_tm_error *error);
21 static int ice_shaper_profile_add(struct rte_eth_dev *dev,
22 uint32_t shaper_profile_id,
23 struct rte_tm_shaper_params *profile,
24 struct rte_tm_error *error);
25 static int ice_shaper_profile_del(struct rte_eth_dev *dev,
26 uint32_t shaper_profile_id,
27 struct rte_tm_error *error);
29 const struct rte_tm_ops ice_tm_ops = {
30 .shaper_profile_add = ice_shaper_profile_add,
31 .shaper_profile_delete = ice_shaper_profile_del,
32 .node_add = ice_tm_node_add,
33 .node_delete = ice_tm_node_delete,
34 .node_type_get = ice_node_type_get,
35 .hierarchy_commit = ice_hierarchy_commit,
39 ice_tm_conf_init(struct rte_eth_dev *dev)
41 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
43 /* initialize node configuration */
44 TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
45 pf->tm_conf.root = NULL;
46 TAILQ_INIT(&pf->tm_conf.tc_list);
47 TAILQ_INIT(&pf->tm_conf.vsi_list);
48 TAILQ_INIT(&pf->tm_conf.qgroup_list);
49 TAILQ_INIT(&pf->tm_conf.queue_list);
50 pf->tm_conf.nb_tc_node = 0;
51 pf->tm_conf.nb_vsi_node = 0;
52 pf->tm_conf.nb_qgroup_node = 0;
53 pf->tm_conf.nb_queue_node = 0;
54 pf->tm_conf.committed = false;
58 ice_tm_conf_uninit(struct rte_eth_dev *dev)
60 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
61 struct ice_tm_node *tm_node;
63 /* clear node configuration */
64 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
65 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
68 pf->tm_conf.nb_queue_node = 0;
69 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.qgroup_list))) {
70 TAILQ_REMOVE(&pf->tm_conf.qgroup_list, tm_node, node);
73 pf->tm_conf.nb_qgroup_node = 0;
74 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list))) {
75 TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node);
78 pf->tm_conf.nb_vsi_node = 0;
79 while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
80 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
83 pf->tm_conf.nb_tc_node = 0;
84 if (pf->tm_conf.root) {
85 rte_free(pf->tm_conf.root);
86 pf->tm_conf.root = NULL;
90 static inline struct ice_tm_node *
91 ice_tm_node_search(struct rte_eth_dev *dev,
92 uint32_t node_id, enum ice_tm_node_type *node_type)
94 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
95 struct ice_tm_node_list *tc_list = &pf->tm_conf.tc_list;
96 struct ice_tm_node_list *vsi_list = &pf->tm_conf.vsi_list;
97 struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
98 struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
99 struct ice_tm_node *tm_node;
101 if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
102 *node_type = ICE_TM_NODE_TYPE_PORT;
103 return pf->tm_conf.root;
106 TAILQ_FOREACH(tm_node, tc_list, node) {
107 if (tm_node->id == node_id) {
108 *node_type = ICE_TM_NODE_TYPE_TC;
113 TAILQ_FOREACH(tm_node, vsi_list, node) {
114 if (tm_node->id == node_id) {
115 *node_type = ICE_TM_NODE_TYPE_VSI;
120 TAILQ_FOREACH(tm_node, qgroup_list, node) {
121 if (tm_node->id == node_id) {
122 *node_type = ICE_TM_NODE_TYPE_QGROUP;
127 TAILQ_FOREACH(tm_node, queue_list, node) {
128 if (tm_node->id == node_id) {
129 *node_type = ICE_TM_NODE_TYPE_QUEUE;
138 ice_node_param_check(struct ice_pf *pf, uint32_t node_id,
139 uint32_t priority, uint32_t weight,
140 struct rte_tm_node_params *params,
141 struct rte_tm_error *error)
143 /* checked all the unsupported parameter */
144 if (node_id == RTE_TM_NODE_ID_NULL) {
145 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
146 error->message = "invalid node id";
151 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
152 error->message = "priority should be less than 8";
156 if (weight > 200 || weight < 1) {
157 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
158 error->message = "weight must be between 1 and 200";
162 /* not support shared shaper */
163 if (params->shared_shaper_id) {
164 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
165 error->message = "shared shaper not supported";
168 if (params->n_shared_shapers) {
169 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
170 error->message = "shared shaper not supported";
174 /* for non-leaf node */
175 if (node_id >= pf->dev_data->nb_tx_queues) {
176 if (params->nonleaf.wfq_weight_mode) {
178 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
179 error->message = "WFQ not supported";
182 if (params->nonleaf.n_sp_priorities != 1) {
184 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
185 error->message = "SP priority not supported";
187 } else if (params->nonleaf.wfq_weight_mode &&
188 !(*params->nonleaf.wfq_weight_mode)) {
190 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
191 error->message = "WFP should be byte mode";
199 if (params->leaf.cman) {
200 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
201 error->message = "Congestion management not supported";
204 if (params->leaf.wred.wred_profile_id !=
205 RTE_TM_WRED_PROFILE_ID_NONE) {
207 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
208 error->message = "WRED not supported";
211 if (params->leaf.wred.shared_wred_context_id) {
213 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
214 error->message = "WRED not supported";
217 if (params->leaf.wred.n_shared_wred_contexts) {
219 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
220 error->message = "WRED not supported";
228 ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
229 int *is_leaf, struct rte_tm_error *error)
231 enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
232 struct ice_tm_node *tm_node;
234 if (!is_leaf || !error)
237 if (node_id == RTE_TM_NODE_ID_NULL) {
238 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
239 error->message = "invalid node id";
243 /* check if the node id exists */
244 tm_node = ice_tm_node_search(dev, node_id, &node_type);
246 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
247 error->message = "no such node";
251 if (node_type == ICE_TM_NODE_TYPE_QUEUE)
259 static inline struct ice_tm_shaper_profile *
260 ice_shaper_profile_search(struct rte_eth_dev *dev,
261 uint32_t shaper_profile_id)
263 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
264 struct ice_shaper_profile_list *shaper_profile_list =
265 &pf->tm_conf.shaper_profile_list;
266 struct ice_tm_shaper_profile *shaper_profile;
268 TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
269 if (shaper_profile_id == shaper_profile->shaper_profile_id)
270 return shaper_profile;
277 ice_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
278 struct rte_tm_error *error)
280 /* min bucket size not supported */
281 if (profile->committed.size) {
282 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
283 error->message = "committed bucket size not supported";
286 /* max bucket size not supported */
287 if (profile->peak.size) {
288 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
289 error->message = "peak bucket size not supported";
292 /* length adjustment not supported */
293 if (profile->pkt_length_adjust) {
294 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
295 error->message = "packet length adjustment not supported";
303 ice_shaper_profile_add(struct rte_eth_dev *dev,
304 uint32_t shaper_profile_id,
305 struct rte_tm_shaper_params *profile,
306 struct rte_tm_error *error)
308 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
309 struct ice_tm_shaper_profile *shaper_profile;
312 if (!profile || !error)
315 ret = ice_shaper_profile_param_check(profile, error);
319 shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
321 if (shaper_profile) {
322 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
323 error->message = "profile ID exist";
327 shaper_profile = rte_zmalloc("ice_tm_shaper_profile",
328 sizeof(struct ice_tm_shaper_profile),
332 shaper_profile->shaper_profile_id = shaper_profile_id;
333 rte_memcpy(&shaper_profile->profile, profile,
334 sizeof(struct rte_tm_shaper_params));
335 TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
336 shaper_profile, node);
342 ice_shaper_profile_del(struct rte_eth_dev *dev,
343 uint32_t shaper_profile_id,
344 struct rte_tm_error *error)
346 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
347 struct ice_tm_shaper_profile *shaper_profile;
352 shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
354 if (!shaper_profile) {
355 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
356 error->message = "profile ID not exist";
360 /* don't delete a profile if it's used by one or several nodes */
361 if (shaper_profile->reference_count) {
362 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
363 error->message = "profile in use";
367 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
368 rte_free(shaper_profile);
374 ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
375 uint32_t parent_node_id, uint32_t priority,
376 uint32_t weight, uint32_t level_id,
377 struct rte_tm_node_params *params,
378 struct rte_tm_error *error)
380 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
381 enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
382 enum ice_tm_node_type parent_node_type = ICE_TM_NODE_TYPE_MAX;
383 struct ice_tm_shaper_profile *shaper_profile = NULL;
384 struct ice_tm_node *tm_node;
385 struct ice_tm_node *parent_node;
390 if (!params || !error)
393 /* if already committed */
394 if (pf->tm_conf.committed) {
395 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
396 error->message = "already committed";
400 ret = ice_node_param_check(pf, node_id, priority, weight,
405 /* check if the node is already existed */
406 if (ice_tm_node_search(dev, node_id, &node_type)) {
407 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
408 error->message = "node id already used";
412 /* check the shaper profile id */
413 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
414 shaper_profile = ice_shaper_profile_search(dev,
415 params->shaper_profile_id);
416 if (!shaper_profile) {
418 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
419 error->message = "shaper profile not exist";
424 /* root node if not have a parent */
425 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
427 if (level_id != ICE_TM_NODE_TYPE_PORT) {
428 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
429 error->message = "Wrong level";
433 /* obviously no more than one root */
434 if (pf->tm_conf.root) {
435 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
436 error->message = "already have a root";
440 /* add the root node */
441 tm_node = rte_zmalloc("ice_tm_node",
442 sizeof(struct ice_tm_node),
446 tm_node->id = node_id;
447 tm_node->parent = NULL;
448 tm_node->reference_count = 0;
449 tm_node->children = (struct ice_tm_node **)
450 rte_calloc(NULL, 256, (sizeof(struct ice_tm_node *)), 0);
451 rte_memcpy(&tm_node->params, params,
452 sizeof(struct rte_tm_node_params));
453 pf->tm_conf.root = tm_node;
457 /* TC or queue node */
458 /* check the parent node */
459 parent_node = ice_tm_node_search(dev, parent_node_id,
462 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
463 error->message = "parent not exist";
466 if (parent_node_type != ICE_TM_NODE_TYPE_PORT &&
467 parent_node_type != ICE_TM_NODE_TYPE_TC &&
468 parent_node_type != ICE_TM_NODE_TYPE_VSI &&
469 parent_node_type != ICE_TM_NODE_TYPE_QGROUP) {
470 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
471 error->message = "parent is not valid";
475 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
476 level_id != (uint32_t)parent_node_type + 1) {
477 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
478 error->message = "Wrong level";
482 /* check the node number */
483 if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
484 /* check the TC number */
485 if (pf->tm_conf.nb_tc_node >= tc_nb) {
486 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
487 error->message = "too many TCs";
490 } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) {
491 /* check the VSI number */
492 if (pf->tm_conf.nb_vsi_node >= vsi_nb) {
493 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
494 error->message = "too many VSIs";
497 } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) {
498 /* check the queue group number */
499 if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
500 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
501 error->message = "too many queue groups";
505 /* check the queue number */
506 if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
507 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
508 error->message = "too many queues";
511 if (node_id >= pf->dev_data->nb_tx_queues) {
512 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
513 error->message = "too large queue id";
518 /* add the TC or VSI or queue group or queue node */
519 tm_node = rte_zmalloc("ice_tm_node",
520 sizeof(struct ice_tm_node),
524 tm_node->id = node_id;
525 tm_node->priority = priority;
526 tm_node->weight = weight;
527 tm_node->reference_count = 0;
528 tm_node->parent = parent_node;
529 tm_node->shaper_profile = shaper_profile;
530 tm_node->children = (struct ice_tm_node **)
531 rte_calloc(NULL, 256, (sizeof(struct ice_tm_node *)), 0);
532 tm_node->parent->children[tm_node->parent->reference_count] = tm_node;
534 if (tm_node->priority != 0 && level_id != ICE_TM_NODE_TYPE_QUEUE &&
535 level_id != ICE_TM_NODE_TYPE_QGROUP)
536 PMD_DRV_LOG(WARNING, "priority != 0 not supported in level %d",
539 if (tm_node->weight != 1 && level_id != ICE_TM_NODE_TYPE_QUEUE)
540 PMD_DRV_LOG(WARNING, "weight != 1 not supported in level %d",
543 rte_memcpy(&tm_node->params, params,
544 sizeof(struct rte_tm_node_params));
545 if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
546 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
548 tm_node->tc = pf->tm_conf.nb_tc_node;
549 pf->tm_conf.nb_tc_node++;
550 } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) {
551 TAILQ_INSERT_TAIL(&pf->tm_conf.vsi_list,
553 tm_node->tc = parent_node->tc;
554 pf->tm_conf.nb_vsi_node++;
555 } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) {
556 TAILQ_INSERT_TAIL(&pf->tm_conf.qgroup_list,
558 tm_node->tc = parent_node->parent->tc;
559 pf->tm_conf.nb_qgroup_node++;
561 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
563 tm_node->tc = parent_node->parent->parent->tc;
564 pf->tm_conf.nb_queue_node++;
566 tm_node->parent->reference_count++;
572 ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
573 struct rte_tm_error *error)
575 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
576 enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
577 struct ice_tm_node *tm_node;
582 /* if already committed */
583 if (pf->tm_conf.committed) {
584 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
585 error->message = "already committed";
589 if (node_id == RTE_TM_NODE_ID_NULL) {
590 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
591 error->message = "invalid node id";
595 /* check if the node id exists */
596 tm_node = ice_tm_node_search(dev, node_id, &node_type);
598 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
599 error->message = "no such node";
603 /* the node should have no child */
604 if (tm_node->reference_count) {
605 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
607 "cannot delete a node which has children";
612 if (node_type == ICE_TM_NODE_TYPE_PORT) {
614 pf->tm_conf.root = NULL;
618 /* TC or VSI or queue group or queue node */
619 tm_node->parent->reference_count--;
620 if (node_type == ICE_TM_NODE_TYPE_TC) {
621 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
622 pf->tm_conf.nb_tc_node--;
623 } else if (node_type == ICE_TM_NODE_TYPE_VSI) {
624 TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node);
625 pf->tm_conf.nb_vsi_node--;
626 } else if (node_type == ICE_TM_NODE_TYPE_QGROUP) {
627 TAILQ_REMOVE(&pf->tm_conf.qgroup_list, tm_node, node);
628 pf->tm_conf.nb_qgroup_node--;
630 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
631 pf->tm_conf.nb_queue_node--;
638 static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev,
639 struct ice_sched_node *queue_sched_node,
640 struct ice_sched_node *dst_node,
643 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644 struct ice_aqc_move_txqs_data *buf;
645 struct ice_sched_node *queue_parent_node;
647 int ret = ICE_SUCCESS;
648 uint16_t buf_size = ice_struct_size(buf, txqs, 1);
650 buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf));
652 queue_parent_node = queue_sched_node->parent;
653 buf->src_teid = queue_parent_node->info.node_teid;
654 buf->dest_teid = dst_node->info.node_teid;
655 buf->txqs[0].q_teid = queue_sched_node->info.node_teid;
656 buf->txqs[0].txq_id = queue_id;
658 ret = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50,
659 NULL, buf, buf_size, &txqs_moved, NULL);
660 if (ret || txqs_moved == 0) {
661 PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id);
662 return ICE_ERR_PARAM;
665 if (queue_parent_node->num_children > 0) {
666 queue_parent_node->num_children--;
667 queue_parent_node->children[queue_parent_node->num_children] = NULL;
669 PMD_DRV_LOG(ERR, "invalid children number %d for queue %u",
670 queue_parent_node->num_children, queue_id);
671 return ICE_ERR_PARAM;
673 dst_node->children[dst_node->num_children++] = queue_sched_node;
674 queue_sched_node->parent = dst_node;
675 ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info);
680 static int ice_hierarchy_commit(struct rte_eth_dev *dev,
682 __rte_unused struct rte_tm_error *error)
684 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
685 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
686 struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
687 struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
688 struct ice_tm_node *tm_node;
689 struct ice_sched_node *node;
690 struct ice_sched_node *vsi_node;
691 struct ice_sched_node *queue_node;
692 struct ice_tx_queue *txq;
694 int ret_val = ICE_SUCCESS;
698 uint32_t idx_vsi_child;
700 uint32_t nb_vsi_child;
706 for (i = 0; i < dev->data->nb_tx_queues; i++) {
707 ret_val = ice_tx_queue_stop(dev, i);
709 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
710 PMD_DRV_LOG(ERR, "stop queue %u failed", i);
715 node = hw->port_info->root;
716 vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
717 for (i = 0; i < vsi_layer; i++)
718 node = node->children[0];
720 nb_vsi_child = vsi_node->num_children;
721 nb_qg = vsi_node->children[0]->num_children;
726 TAILQ_FOREACH(tm_node, qgroup_list, node) {
727 struct ice_tm_node *tm_child_node;
728 struct ice_sched_node *qgroup_sched_node =
729 vsi_node->children[idx_vsi_child]->children[idx_qg];
731 for (i = 0; i < tm_node->reference_count; i++) {
732 tm_child_node = tm_node->children[i];
733 qid = tm_child_node->id;
734 ret_val = ice_tx_queue_start(dev, qid);
736 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
737 PMD_DRV_LOG(ERR, "start queue %u failed", qid);
740 txq = dev->data->tx_queues[qid];
741 q_teid = txq->q_teid;
742 queue_node = ice_sched_get_node(hw->port_info, q_teid);
743 if (queue_node == NULL) {
744 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
745 PMD_DRV_LOG(ERR, "get queue %u node failed", qid);
748 if (queue_node->info.parent_teid == qgroup_sched_node->info.node_teid)
750 ret_val = ice_move_recfg_lan_txq(dev, queue_node, qgroup_sched_node, qid);
752 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
753 PMD_DRV_LOG(ERR, "move queue %u failed", qid);
757 if (tm_node->reference_count != 0 && tm_node->shaper_profile) {
758 uint32_t node_teid = qgroup_sched_node->info.node_teid;
759 /* Transfer from Byte per seconds to Kbps */
760 peak = tm_node->shaper_profile->profile.peak.rate;
761 peak = peak / 1000 * BITS_PER_BYTE;
762 ret_val = ice_sched_set_node_bw_lmt_per_tc(hw->port_info,
769 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
771 "configure queue group %u bandwidth failed",
776 priority = 7 - tm_node->priority;
777 ret_val = ice_sched_cfg_sibl_node_prio_lock(hw->port_info, qgroup_sched_node,
780 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
781 PMD_DRV_LOG(ERR, "configure queue group %u priority failed",
786 if (idx_qg >= nb_qg) {
790 if (idx_vsi_child >= nb_vsi_child) {
791 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
792 PMD_DRV_LOG(ERR, "too many queues");
797 TAILQ_FOREACH(tm_node, queue_list, node) {
799 txq = dev->data->tx_queues[qid];
801 q_teid = txq->q_teid;
802 if (tm_node->shaper_profile) {
803 /* Transfer from Byte per seconds to Kbps */
804 peak = tm_node->shaper_profile->profile.peak.rate;
805 peak = peak / 1000 * BITS_PER_BYTE;
806 ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
807 tm_node->tc, tm_node->id,
808 ICE_MAX_BW, (u32)peak);
810 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
812 "configure queue %u bandwidth failed",
817 priority = 7 - tm_node->priority;
818 ret_val = ice_cfg_vsi_q_priority(hw->port_info, 1,
821 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
822 PMD_DRV_LOG(ERR, "configure queue %u priority failed", tm_node->priority);
826 ret_val = ice_cfg_q_bw_alloc(hw->port_info, vsi->idx,
827 tm_node->tc, tm_node->id,
828 ICE_MAX_BW, (u32)tm_node->weight);
830 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
831 PMD_DRV_LOG(ERR, "configure queue %u weight failed", tm_node->weight);
839 /* clear all the traffic manager configuration */
841 ice_tm_conf_uninit(dev);
842 ice_tm_conf_init(dev);