1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
4 #include <rte_tm_driver.h>
8 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
9 __rte_unused int clear_on_fail,
10 __rte_unused struct rte_tm_error *error);
11 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
12 uint32_t parent_node_id, uint32_t priority,
13 uint32_t weight, uint32_t level_id,
14 struct rte_tm_node_params *params,
15 struct rte_tm_error *error);
16 static int iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
17 struct rte_tm_error *error);
18 static int iavf_tm_capabilities_get(struct rte_eth_dev *dev,
19 struct rte_tm_capabilities *cap,
20 struct rte_tm_error *error);
21 static int iavf_level_capabilities_get(struct rte_eth_dev *dev,
23 struct rte_tm_level_capabilities *cap,
24 struct rte_tm_error *error);
25 static int iavf_node_capabilities_get(struct rte_eth_dev *dev,
27 struct rte_tm_node_capabilities *cap,
28 struct rte_tm_error *error);
29 static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
30 int *is_leaf, struct rte_tm_error *error);
32 const struct rte_tm_ops iavf_tm_ops = {
33 .node_add = iavf_tm_node_add,
34 .node_delete = iavf_tm_node_delete,
35 .capabilities_get = iavf_tm_capabilities_get,
36 .level_capabilities_get = iavf_level_capabilities_get,
37 .node_capabilities_get = iavf_node_capabilities_get,
38 .node_type_get = iavf_node_type_get,
39 .hierarchy_commit = iavf_hierarchy_commit,
43 iavf_tm_conf_init(struct rte_eth_dev *dev)
45 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
47 /* initialize node configuration */
48 vf->tm_conf.root = NULL;
49 TAILQ_INIT(&vf->tm_conf.tc_list);
50 TAILQ_INIT(&vf->tm_conf.queue_list);
51 vf->tm_conf.nb_tc_node = 0;
52 vf->tm_conf.nb_queue_node = 0;
53 vf->tm_conf.committed = false;
57 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
59 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
60 struct iavf_tm_node *tm_node;
62 /* clear node configuration */
63 while ((tm_node = TAILQ_FIRST(&vf->tm_conf.queue_list))) {
64 TAILQ_REMOVE(&vf->tm_conf.queue_list, tm_node, node);
67 vf->tm_conf.nb_queue_node = 0;
68 while ((tm_node = TAILQ_FIRST(&vf->tm_conf.tc_list))) {
69 TAILQ_REMOVE(&vf->tm_conf.tc_list, tm_node, node);
72 vf->tm_conf.nb_tc_node = 0;
73 if (vf->tm_conf.root) {
74 rte_free(vf->tm_conf.root);
75 vf->tm_conf.root = NULL;
79 static inline struct iavf_tm_node *
80 iavf_tm_node_search(struct rte_eth_dev *dev,
81 uint32_t node_id, enum iavf_tm_node_type *node_type)
83 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
84 struct iavf_tm_node_list *tc_list = &vf->tm_conf.tc_list;
85 struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
86 struct iavf_tm_node *tm_node;
88 if (vf->tm_conf.root && vf->tm_conf.root->id == node_id) {
89 *node_type = IAVF_TM_NODE_TYPE_PORT;
90 return vf->tm_conf.root;
93 TAILQ_FOREACH(tm_node, tc_list, node) {
94 if (tm_node->id == node_id) {
95 *node_type = IAVF_TM_NODE_TYPE_TC;
100 TAILQ_FOREACH(tm_node, queue_list, node) {
101 if (tm_node->id == node_id) {
102 *node_type = IAVF_TM_NODE_TYPE_QUEUE;
111 iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
112 uint32_t priority, uint32_t weight,
113 struct rte_tm_node_params *params,
114 struct rte_tm_error *error)
116 /* checked all the unsupported parameter */
117 if (node_id == RTE_TM_NODE_ID_NULL) {
118 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
119 error->message = "invalid node id";
124 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
125 error->message = "priority should be 0";
130 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
131 error->message = "weight must be 1";
135 /* not support shaper profile */
136 if (params->shaper_profile_id) {
137 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
138 error->message = "shaper profile not supported";
142 /* not support shared shaper */
143 if (params->shared_shaper_id) {
144 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
145 error->message = "shared shaper not supported";
148 if (params->n_shared_shapers) {
149 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
150 error->message = "shared shaper not supported";
154 /* for non-leaf node */
155 if (node_id >= vf->num_queue_pairs) {
156 if (params->nonleaf.wfq_weight_mode) {
158 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
159 error->message = "WFQ not supported";
162 if (params->nonleaf.n_sp_priorities != 1) {
164 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
165 error->message = "SP priority not supported";
167 } else if (params->nonleaf.wfq_weight_mode &&
168 !(*params->nonleaf.wfq_weight_mode)) {
170 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
171 error->message = "WFP should be byte mode";
179 if (params->leaf.cman) {
180 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
181 error->message = "Congestion management not supported";
184 if (params->leaf.wred.wred_profile_id !=
185 RTE_TM_WRED_PROFILE_ID_NONE) {
187 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
188 error->message = "WRED not supported";
191 if (params->leaf.wred.shared_wred_context_id) {
193 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
194 error->message = "WRED not supported";
197 if (params->leaf.wred.n_shared_wred_contexts) {
199 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
200 error->message = "WRED not supported";
208 iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
209 int *is_leaf, struct rte_tm_error *error)
211 enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
212 struct iavf_tm_node *tm_node;
214 if (!is_leaf || !error)
217 if (node_id == RTE_TM_NODE_ID_NULL) {
218 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
219 error->message = "invalid node id";
223 /* check if the node id exists */
224 tm_node = iavf_tm_node_search(dev, node_id, &node_type);
226 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
227 error->message = "no such node";
231 if (node_type == IAVF_TM_NODE_TYPE_QUEUE)
240 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
241 uint32_t parent_node_id, uint32_t priority,
242 uint32_t weight, uint32_t level_id,
243 struct rte_tm_node_params *params,
244 struct rte_tm_error *error)
246 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
247 enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
248 enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
249 struct iavf_tm_node *tm_node;
250 struct iavf_tm_node *parent_node;
251 uint16_t tc_nb = vf->qos_cap->num_elem;
254 if (!params || !error)
257 /* if already committed */
258 if (vf->tm_conf.committed) {
259 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
260 error->message = "already committed";
264 ret = iavf_node_param_check(vf, node_id, priority, weight,
269 /* check if the node is already existed */
270 if (iavf_tm_node_search(dev, node_id, &node_type)) {
271 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
272 error->message = "node id already used";
276 /* root node if not have a parent */
277 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
279 if (level_id != IAVF_TM_NODE_TYPE_PORT) {
280 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
281 error->message = "Wrong level";
285 /* obviously no more than one root */
286 if (vf->tm_conf.root) {
287 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
288 error->message = "already have a root";
292 /* add the root node */
293 tm_node = rte_zmalloc("iavf_tm_node",
294 sizeof(struct iavf_tm_node),
298 tm_node->id = node_id;
299 tm_node->parent = NULL;
300 tm_node->reference_count = 0;
301 rte_memcpy(&tm_node->params, params,
302 sizeof(struct rte_tm_node_params));
303 vf->tm_conf.root = tm_node;
307 /* TC or queue node */
308 /* check the parent node */
309 parent_node = iavf_tm_node_search(dev, parent_node_id,
312 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
313 error->message = "parent not exist";
316 if (parent_node_type != IAVF_TM_NODE_TYPE_PORT &&
317 parent_node_type != IAVF_TM_NODE_TYPE_TC) {
318 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
319 error->message = "parent is not root or TC";
323 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
324 level_id != (uint32_t)parent_node_type + 1) {
325 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
326 error->message = "Wrong level";
330 /* check the node number */
331 if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
332 /* check the TC number */
333 if (vf->tm_conf.nb_tc_node >= tc_nb) {
334 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
335 error->message = "too many TCs";
339 /* check the queue number */
340 if (parent_node->reference_count >= vf->num_queue_pairs) {
341 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
342 error->message = "too many queues";
345 if (node_id >= vf->num_queue_pairs) {
346 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
347 error->message = "too large queue id";
352 /* add the TC or queue node */
353 tm_node = rte_zmalloc("iavf_tm_node",
354 sizeof(struct iavf_tm_node),
358 tm_node->id = node_id;
359 tm_node->reference_count = 0;
360 tm_node->parent = parent_node;
361 rte_memcpy(&tm_node->params, params,
362 sizeof(struct rte_tm_node_params));
363 if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
364 TAILQ_INSERT_TAIL(&vf->tm_conf.tc_list,
366 tm_node->tc = vf->tm_conf.nb_tc_node;
367 vf->tm_conf.nb_tc_node++;
369 TAILQ_INSERT_TAIL(&vf->tm_conf.queue_list,
371 tm_node->tc = parent_node->tc;
372 vf->tm_conf.nb_queue_node++;
374 tm_node->parent->reference_count++;
380 iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
381 struct rte_tm_error *error)
383 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
384 enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
385 struct iavf_tm_node *tm_node;
390 /* if already committed */
391 if (vf->tm_conf.committed) {
392 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
393 error->message = "already committed";
397 if (node_id == RTE_TM_NODE_ID_NULL) {
398 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
399 error->message = "invalid node id";
403 /* check if the node id exists */
404 tm_node = iavf_tm_node_search(dev, node_id, &node_type);
406 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
407 error->message = "no such node";
411 /* the node should have no child */
412 if (tm_node->reference_count) {
413 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
415 "cannot delete a node which has children";
420 if (node_type == IAVF_TM_NODE_TYPE_PORT) {
422 vf->tm_conf.root = NULL;
426 /* TC or queue node */
427 tm_node->parent->reference_count--;
428 if (node_type == IAVF_TM_NODE_TYPE_TC) {
429 TAILQ_REMOVE(&vf->tm_conf.tc_list, tm_node, node);
430 vf->tm_conf.nb_tc_node--;
432 TAILQ_REMOVE(&vf->tm_conf.queue_list, tm_node, node);
433 vf->tm_conf.nb_queue_node--;
441 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
442 struct rte_tm_capabilities *cap,
443 struct rte_tm_error *error)
445 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
446 uint16_t tc_nb = vf->qos_cap->num_elem;
451 if (tc_nb > vf->vf_res->num_queue_pairs)
454 error->type = RTE_TM_ERROR_TYPE_NONE;
456 /* set all the parameters to 0 first. */
457 memset(cap, 0, sizeof(struct rte_tm_capabilities));
460 * support port + TCs + queues
461 * here shows the max capability not the current configuration.
463 cap->n_nodes_max = 1 + IAVF_MAX_TRAFFIC_CLASS
464 + vf->num_queue_pairs;
465 cap->n_levels_max = 3; /* port, TC, queue */
466 cap->non_leaf_nodes_identical = 1;
467 cap->leaf_nodes_identical = 1;
468 cap->shaper_n_max = cap->n_nodes_max;
469 cap->shaper_private_n_max = cap->n_nodes_max;
470 cap->shaper_private_dual_rate_n_max = 0;
471 cap->shaper_private_rate_min = 0;
472 /* Bytes per second */
473 cap->shaper_private_rate_max =
474 (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
475 cap->shaper_private_packet_mode_supported = 0;
476 cap->shaper_private_byte_mode_supported = 1;
477 cap->shaper_shared_n_max = 0;
478 cap->shaper_shared_n_nodes_per_shaper_max = 0;
479 cap->shaper_shared_n_shapers_per_node_max = 0;
480 cap->shaper_shared_dual_rate_n_max = 0;
481 cap->shaper_shared_rate_min = 0;
482 cap->shaper_shared_rate_max = 0;
483 cap->shaper_shared_packet_mode_supported = 0;
484 cap->shaper_shared_byte_mode_supported = 0;
485 cap->sched_n_children_max = vf->num_queue_pairs;
486 cap->sched_sp_n_priorities_max = 1;
487 cap->sched_wfq_n_children_per_group_max = 0;
488 cap->sched_wfq_n_groups_max = 0;
489 cap->sched_wfq_weight_max = 1;
490 cap->sched_wfq_packet_mode_supported = 0;
491 cap->sched_wfq_byte_mode_supported = 0;
492 cap->cman_head_drop_supported = 0;
493 cap->dynamic_update_mask = 0;
494 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
495 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
496 cap->cman_wred_context_n_max = 0;
497 cap->cman_wred_context_private_n_max = 0;
498 cap->cman_wred_context_shared_n_max = 0;
499 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
500 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
507 iavf_level_capabilities_get(struct rte_eth_dev *dev,
509 struct rte_tm_level_capabilities *cap,
510 struct rte_tm_error *error)
512 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
517 if (level_id >= IAVF_TM_NODE_TYPE_MAX) {
518 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
519 error->message = "too deep level";
524 if (level_id == IAVF_TM_NODE_TYPE_PORT) {
525 cap->n_nodes_max = 1;
526 cap->n_nodes_nonleaf_max = 1;
527 cap->n_nodes_leaf_max = 0;
528 } else if (level_id == IAVF_TM_NODE_TYPE_TC) {
530 cap->n_nodes_max = IAVF_MAX_TRAFFIC_CLASS;
531 cap->n_nodes_nonleaf_max = IAVF_MAX_TRAFFIC_CLASS;
532 cap->n_nodes_leaf_max = 0;
535 cap->n_nodes_max = vf->num_queue_pairs;
536 cap->n_nodes_nonleaf_max = 0;
537 cap->n_nodes_leaf_max = vf->num_queue_pairs;
540 cap->non_leaf_nodes_identical = true;
541 cap->leaf_nodes_identical = true;
543 if (level_id != IAVF_TM_NODE_TYPE_QUEUE) {
544 cap->nonleaf.shaper_private_supported = true;
545 cap->nonleaf.shaper_private_dual_rate_supported = false;
546 cap->nonleaf.shaper_private_rate_min = 0;
547 /* Bytes per second */
548 cap->nonleaf.shaper_private_rate_max =
549 (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
550 cap->nonleaf.shaper_private_packet_mode_supported = 0;
551 cap->nonleaf.shaper_private_byte_mode_supported = 1;
552 cap->nonleaf.shaper_shared_n_max = 0;
553 cap->nonleaf.shaper_shared_packet_mode_supported = 0;
554 cap->nonleaf.shaper_shared_byte_mode_supported = 0;
555 if (level_id == IAVF_TM_NODE_TYPE_PORT)
556 cap->nonleaf.sched_n_children_max =
557 IAVF_MAX_TRAFFIC_CLASS;
559 cap->nonleaf.sched_n_children_max =
561 cap->nonleaf.sched_sp_n_priorities_max = 1;
562 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
563 cap->nonleaf.sched_wfq_n_groups_max = 0;
564 cap->nonleaf.sched_wfq_weight_max = 1;
565 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
566 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
567 cap->nonleaf.stats_mask = 0;
573 cap->leaf.shaper_private_supported = false;
574 cap->leaf.shaper_private_dual_rate_supported = false;
575 cap->leaf.shaper_private_rate_min = 0;
576 /* Bytes per second */
577 cap->leaf.shaper_private_rate_max =
578 (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
579 cap->leaf.shaper_private_packet_mode_supported = 0;
580 cap->leaf.shaper_private_byte_mode_supported = 1;
581 cap->leaf.shaper_shared_n_max = 0;
582 cap->leaf.shaper_shared_packet_mode_supported = 0;
583 cap->leaf.shaper_shared_byte_mode_supported = 0;
584 cap->leaf.cman_head_drop_supported = false;
585 cap->leaf.cman_wred_context_private_supported = true;
586 cap->leaf.cman_wred_context_shared_n_max = 0;
587 cap->leaf.stats_mask = 0;
593 iavf_node_capabilities_get(struct rte_eth_dev *dev,
595 struct rte_tm_node_capabilities *cap,
596 struct rte_tm_error *error)
598 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
599 enum iavf_tm_node_type node_type;
600 struct virtchnl_qos_cap_elem tc_cap;
601 struct iavf_tm_node *tm_node;
606 if (node_id == RTE_TM_NODE_ID_NULL) {
607 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
608 error->message = "invalid node id";
612 /* check if the node id exists */
613 tm_node = iavf_tm_node_search(dev, node_id, &node_type);
615 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
616 error->message = "no such node";
620 if (node_type != IAVF_TM_NODE_TYPE_TC) {
621 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
622 error->message = "not support capability get";
626 tc_cap = vf->qos_cap->cap[tm_node->tc];
627 if (tc_cap.tc_num != tm_node->tc) {
628 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
629 error->message = "tc not match";
633 cap->shaper_private_supported = true;
634 cap->shaper_private_dual_rate_supported = false;
635 /* Bytes per second */
636 cap->shaper_private_rate_min =
637 (uint64_t)tc_cap.shaper.committed * 1000 / IAVF_BITS_PER_BYTE;
638 cap->shaper_private_rate_max =
639 (uint64_t)tc_cap.shaper.peak * 1000 / IAVF_BITS_PER_BYTE;
640 cap->shaper_shared_n_max = 0;
641 cap->nonleaf.sched_n_children_max = vf->num_queue_pairs;
642 cap->nonleaf.sched_sp_n_priorities_max = 1;
643 cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
644 cap->nonleaf.sched_wfq_n_groups_max = 0;
645 cap->nonleaf.sched_wfq_weight_max = tc_cap.weight;
651 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
653 __rte_unused struct rte_tm_error *error)
655 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
656 struct iavf_adapter *adapter =
657 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
658 struct virtchnl_queue_tc_mapping *q_tc_mapping;
659 struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
660 struct iavf_tm_node *tm_node;
661 struct iavf_qtc_map *qtc_map;
663 int index = 0, node_committed = 0;
664 int i, ret_val = IAVF_SUCCESS;
666 /* check if port is stopped */
667 if (adapter->stopped != 1) {
668 PMD_DRV_LOG(ERR, "Please stop port first");
669 ret_val = IAVF_ERR_NOT_READY;
673 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)) {
674 PMD_DRV_LOG(ERR, "VF queue tc mapping is not supported");
675 ret_val = IAVF_NOT_SUPPORTED;
679 /* check if all TC nodes are set with VF vsi */
680 if (vf->tm_conf.nb_tc_node != vf->qos_cap->num_elem) {
681 PMD_DRV_LOG(ERR, "Does not set VF vsi nodes to all TCs");
682 ret_val = IAVF_ERR_PARAM;
686 size = sizeof(*q_tc_mapping) + sizeof(q_tc_mapping->tc[0]) *
687 (vf->qos_cap->num_elem - 1);
688 q_tc_mapping = rte_zmalloc("q_tc", size, 0);
690 ret_val = IAVF_ERR_NO_MEMORY;
694 q_tc_mapping->vsi_id = vf->vsi.vsi_id;
695 q_tc_mapping->num_tc = vf->qos_cap->num_elem;
696 q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
698 TAILQ_FOREACH(tm_node, queue_list, node) {
699 if (tm_node->tc >= q_tc_mapping->num_tc) {
700 PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
701 ret_val = IAVF_ERR_PARAM;
704 q_tc_mapping->tc[tm_node->tc].req.queue_count++;
708 /* All queues allocated to this VF should be mapped */
709 if (node_committed < vf->num_queue_pairs) {
710 PMD_DRV_LOG(ERR, "queue node is less than allocated queue pairs");
711 ret_val = IAVF_ERR_PARAM;
715 /* store the queue TC mapping info */
716 qtc_map = rte_zmalloc("qtc_map",
717 sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
719 return IAVF_ERR_NO_MEMORY;
721 for (i = 0; i < q_tc_mapping->num_tc; i++) {
722 q_tc_mapping->tc[i].req.start_queue_id = index;
723 index += q_tc_mapping->tc[i].req.queue_count;
725 qtc_map[i].start_queue_id =
726 q_tc_mapping->tc[i].req.start_queue_id;
727 qtc_map[i].queue_count = q_tc_mapping->tc[i].req.queue_count;
730 ret_val = iavf_set_q_tc_map(dev, q_tc_mapping, size);
734 vf->qtc_map = qtc_map;
735 vf->tm_conf.committed = true;
739 /* clear all the traffic manager configuration */
741 iavf_tm_conf_uninit(dev);
742 iavf_tm_conf_init(dev);