net/ice/base: fix build with GCC 12
[dpdk.git] / drivers / net / ice / ice_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 #include <rte_tm_driver.h>
5
6 #include "ice_ethdev.h"
7 #include "ice_rxtx.h"
8
9 static int ice_hierarchy_commit(struct rte_eth_dev *dev,
10                                  int clear_on_fail,
11                                  __rte_unused struct rte_tm_error *error);
12 static int ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
13               uint32_t parent_node_id, uint32_t priority,
14               uint32_t weight, uint32_t level_id,
15               struct rte_tm_node_params *params,
16               struct rte_tm_error *error);
17 static int ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
18                             struct rte_tm_error *error);
19 static int ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
20                    int *is_leaf, struct rte_tm_error *error);
21 static int ice_shaper_profile_add(struct rte_eth_dev *dev,
22                         uint32_t shaper_profile_id,
23                         struct rte_tm_shaper_params *profile,
24                         struct rte_tm_error *error);
25 static int ice_shaper_profile_del(struct rte_eth_dev *dev,
26                                    uint32_t shaper_profile_id,
27                                    struct rte_tm_error *error);
28
29 const struct rte_tm_ops ice_tm_ops = {
30         .shaper_profile_add = ice_shaper_profile_add,
31         .shaper_profile_delete = ice_shaper_profile_del,
32         .node_add = ice_tm_node_add,
33         .node_delete = ice_tm_node_delete,
34         .node_type_get = ice_node_type_get,
35         .hierarchy_commit = ice_hierarchy_commit,
36 };
37
38 void
39 ice_tm_conf_init(struct rte_eth_dev *dev)
40 {
41         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
42
43         /* initialize node configuration */
44         TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
45         pf->tm_conf.root = NULL;
46         TAILQ_INIT(&pf->tm_conf.tc_list);
47         TAILQ_INIT(&pf->tm_conf.vsi_list);
48         TAILQ_INIT(&pf->tm_conf.qgroup_list);
49         TAILQ_INIT(&pf->tm_conf.queue_list);
50         pf->tm_conf.nb_tc_node = 0;
51         pf->tm_conf.nb_vsi_node = 0;
52         pf->tm_conf.nb_qgroup_node = 0;
53         pf->tm_conf.nb_queue_node = 0;
54         pf->tm_conf.committed = false;
55 }
56
57 void
58 ice_tm_conf_uninit(struct rte_eth_dev *dev)
59 {
60         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
61         struct ice_tm_node *tm_node;
62
63         /* clear node configuration */
64         while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
65                 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
66                 rte_free(tm_node);
67         }
68         pf->tm_conf.nb_queue_node = 0;
69         while ((tm_node = TAILQ_FIRST(&pf->tm_conf.qgroup_list))) {
70                 TAILQ_REMOVE(&pf->tm_conf.qgroup_list, tm_node, node);
71                 rte_free(tm_node);
72         }
73         pf->tm_conf.nb_qgroup_node = 0;
74         while ((tm_node = TAILQ_FIRST(&pf->tm_conf.vsi_list))) {
75                 TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node);
76                 rte_free(tm_node);
77         }
78         pf->tm_conf.nb_vsi_node = 0;
79         while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
80                 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
81                 rte_free(tm_node);
82         }
83         pf->tm_conf.nb_tc_node = 0;
84         if (pf->tm_conf.root) {
85                 rte_free(pf->tm_conf.root);
86                 pf->tm_conf.root = NULL;
87         }
88 }
89
90 static inline struct ice_tm_node *
91 ice_tm_node_search(struct rte_eth_dev *dev,
92                     uint32_t node_id, enum ice_tm_node_type *node_type)
93 {
94         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
95         struct ice_tm_node_list *tc_list = &pf->tm_conf.tc_list;
96         struct ice_tm_node_list *vsi_list = &pf->tm_conf.vsi_list;
97         struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
98         struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
99         struct ice_tm_node *tm_node;
100
101         if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
102                 *node_type = ICE_TM_NODE_TYPE_PORT;
103                 return pf->tm_conf.root;
104         }
105
106         TAILQ_FOREACH(tm_node, tc_list, node) {
107                 if (tm_node->id == node_id) {
108                         *node_type = ICE_TM_NODE_TYPE_TC;
109                         return tm_node;
110                 }
111         }
112
113         TAILQ_FOREACH(tm_node, vsi_list, node) {
114                 if (tm_node->id == node_id) {
115                         *node_type = ICE_TM_NODE_TYPE_VSI;
116                         return tm_node;
117                 }
118         }
119
120         TAILQ_FOREACH(tm_node, qgroup_list, node) {
121                 if (tm_node->id == node_id) {
122                         *node_type = ICE_TM_NODE_TYPE_QGROUP;
123                         return tm_node;
124                 }
125         }
126
127         TAILQ_FOREACH(tm_node, queue_list, node) {
128                 if (tm_node->id == node_id) {
129                         *node_type = ICE_TM_NODE_TYPE_QUEUE;
130                         return tm_node;
131                 }
132         }
133
134         return NULL;
135 }
136
137 static int
138 ice_node_param_check(struct ice_pf *pf, uint32_t node_id,
139                       uint32_t priority, uint32_t weight,
140                       struct rte_tm_node_params *params,
141                       struct rte_tm_error *error)
142 {
143         /* checked all the unsupported parameter */
144         if (node_id == RTE_TM_NODE_ID_NULL) {
145                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
146                 error->message = "invalid node id";
147                 return -EINVAL;
148         }
149
150         if (priority >= 8) {
151                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
152                 error->message = "priority should be less than 8";
153                 return -EINVAL;
154         }
155
156         if (weight > 200 || weight < 1) {
157                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
158                 error->message = "weight must be between 1 and 200";
159                 return -EINVAL;
160         }
161
162         /* not support shared shaper */
163         if (params->shared_shaper_id) {
164                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
165                 error->message = "shared shaper not supported";
166                 return -EINVAL;
167         }
168         if (params->n_shared_shapers) {
169                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
170                 error->message = "shared shaper not supported";
171                 return -EINVAL;
172         }
173
174         /* for non-leaf node */
175         if (node_id >= pf->dev_data->nb_tx_queues) {
176                 if (params->nonleaf.wfq_weight_mode) {
177                         error->type =
178                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
179                         error->message = "WFQ not supported";
180                         return -EINVAL;
181                 }
182                 if (params->nonleaf.n_sp_priorities != 1) {
183                         error->type =
184                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
185                         error->message = "SP priority not supported";
186                         return -EINVAL;
187                 } else if (params->nonleaf.wfq_weight_mode &&
188                            !(*params->nonleaf.wfq_weight_mode)) {
189                         error->type =
190                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
191                         error->message = "WFP should be byte mode";
192                         return -EINVAL;
193                 }
194
195                 return 0;
196         }
197
198         /* for leaf node */
199         if (params->leaf.cman) {
200                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
201                 error->message = "Congestion management not supported";
202                 return -EINVAL;
203         }
204         if (params->leaf.wred.wred_profile_id !=
205             RTE_TM_WRED_PROFILE_ID_NONE) {
206                 error->type =
207                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
208                 error->message = "WRED not supported";
209                 return -EINVAL;
210         }
211         if (params->leaf.wred.shared_wred_context_id) {
212                 error->type =
213                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
214                 error->message = "WRED not supported";
215                 return -EINVAL;
216         }
217         if (params->leaf.wred.n_shared_wred_contexts) {
218                 error->type =
219                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
220                 error->message = "WRED not supported";
221                 return -EINVAL;
222         }
223
224         return 0;
225 }
226
227 static int
228 ice_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
229                    int *is_leaf, struct rte_tm_error *error)
230 {
231         enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
232         struct ice_tm_node *tm_node;
233
234         if (!is_leaf || !error)
235                 return -EINVAL;
236
237         if (node_id == RTE_TM_NODE_ID_NULL) {
238                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
239                 error->message = "invalid node id";
240                 return -EINVAL;
241         }
242
243         /* check if the node id exists */
244         tm_node = ice_tm_node_search(dev, node_id, &node_type);
245         if (!tm_node) {
246                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
247                 error->message = "no such node";
248                 return -EINVAL;
249         }
250
251         if (node_type == ICE_TM_NODE_TYPE_QUEUE)
252                 *is_leaf = true;
253         else
254                 *is_leaf = false;
255
256         return 0;
257 }
258
259 static inline struct ice_tm_shaper_profile *
260 ice_shaper_profile_search(struct rte_eth_dev *dev,
261                            uint32_t shaper_profile_id)
262 {
263         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
264         struct ice_shaper_profile_list *shaper_profile_list =
265                 &pf->tm_conf.shaper_profile_list;
266         struct ice_tm_shaper_profile *shaper_profile;
267
268         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
269                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
270                         return shaper_profile;
271         }
272
273         return NULL;
274 }
275
276 static int
277 ice_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
278                                 struct rte_tm_error *error)
279 {
280         /* min bucket size not supported */
281         if (profile->committed.size) {
282                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
283                 error->message = "committed bucket size not supported";
284                 return -EINVAL;
285         }
286         /* max bucket size not supported */
287         if (profile->peak.size) {
288                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
289                 error->message = "peak bucket size not supported";
290                 return -EINVAL;
291         }
292         /* length adjustment not supported */
293         if (profile->pkt_length_adjust) {
294                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
295                 error->message = "packet length adjustment not supported";
296                 return -EINVAL;
297         }
298
299         return 0;
300 }
301
302 static int
303 ice_shaper_profile_add(struct rte_eth_dev *dev,
304                         uint32_t shaper_profile_id,
305                         struct rte_tm_shaper_params *profile,
306                         struct rte_tm_error *error)
307 {
308         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
309         struct ice_tm_shaper_profile *shaper_profile;
310         int ret;
311
312         if (!profile || !error)
313                 return -EINVAL;
314
315         ret = ice_shaper_profile_param_check(profile, error);
316         if (ret)
317                 return ret;
318
319         shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
320
321         if (shaper_profile) {
322                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
323                 error->message = "profile ID exist";
324                 return -EINVAL;
325         }
326
327         shaper_profile = rte_zmalloc("ice_tm_shaper_profile",
328                                      sizeof(struct ice_tm_shaper_profile),
329                                      0);
330         if (!shaper_profile)
331                 return -ENOMEM;
332         shaper_profile->shaper_profile_id = shaper_profile_id;
333         rte_memcpy(&shaper_profile->profile, profile,
334                          sizeof(struct rte_tm_shaper_params));
335         TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
336                           shaper_profile, node);
337
338         return 0;
339 }
340
341 static int
342 ice_shaper_profile_del(struct rte_eth_dev *dev,
343                         uint32_t shaper_profile_id,
344                         struct rte_tm_error *error)
345 {
346         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
347         struct ice_tm_shaper_profile *shaper_profile;
348
349         if (!error)
350                 return -EINVAL;
351
352         shaper_profile = ice_shaper_profile_search(dev, shaper_profile_id);
353
354         if (!shaper_profile) {
355                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
356                 error->message = "profile ID not exist";
357                 return -EINVAL;
358         }
359
360         /* don't delete a profile if it's used by one or several nodes */
361         if (shaper_profile->reference_count) {
362                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
363                 error->message = "profile in use";
364                 return -EINVAL;
365         }
366
367         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
368         rte_free(shaper_profile);
369
370         return 0;
371 }
372
373 static int
374 ice_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
375               uint32_t parent_node_id, uint32_t priority,
376               uint32_t weight, uint32_t level_id,
377               struct rte_tm_node_params *params,
378               struct rte_tm_error *error)
379 {
380         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
381         enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
382         enum ice_tm_node_type parent_node_type = ICE_TM_NODE_TYPE_MAX;
383         struct ice_tm_shaper_profile *shaper_profile = NULL;
384         struct ice_tm_node *tm_node;
385         struct ice_tm_node *parent_node;
386         uint16_t tc_nb = 1;
387         uint16_t vsi_nb = 1;
388         int ret;
389
390         if (!params || !error)
391                 return -EINVAL;
392
393         /* if already committed */
394         if (pf->tm_conf.committed) {
395                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
396                 error->message = "already committed";
397                 return -EINVAL;
398         }
399
400         ret = ice_node_param_check(pf, node_id, priority, weight,
401                                     params, error);
402         if (ret)
403                 return ret;
404
405         /* check if the node is already existed */
406         if (ice_tm_node_search(dev, node_id, &node_type)) {
407                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
408                 error->message = "node id already used";
409                 return -EINVAL;
410         }
411
412         /* check the shaper profile id */
413         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
414                 shaper_profile = ice_shaper_profile_search(dev,
415                         params->shaper_profile_id);
416                 if (!shaper_profile) {
417                         error->type =
418                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
419                         error->message = "shaper profile not exist";
420                         return -EINVAL;
421                 }
422         }
423
424         /* root node if not have a parent */
425         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
426                 /* check level */
427                 if (level_id != ICE_TM_NODE_TYPE_PORT) {
428                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
429                         error->message = "Wrong level";
430                         return -EINVAL;
431                 }
432
433                 /* obviously no more than one root */
434                 if (pf->tm_conf.root) {
435                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
436                         error->message = "already have a root";
437                         return -EINVAL;
438                 }
439
440                 /* add the root node */
441                 tm_node = rte_zmalloc("ice_tm_node",
442                                       sizeof(struct ice_tm_node),
443                                       0);
444                 if (!tm_node)
445                         return -ENOMEM;
446                 tm_node->id = node_id;
447                 tm_node->parent = NULL;
448                 tm_node->reference_count = 0;
449                 tm_node->children = (struct ice_tm_node **)
450                         rte_calloc(NULL, 256, (sizeof(struct ice_tm_node *)), 0);
451                 rte_memcpy(&tm_node->params, params,
452                                  sizeof(struct rte_tm_node_params));
453                 pf->tm_conf.root = tm_node;
454                 return 0;
455         }
456
457         /* TC or queue node */
458         /* check the parent node */
459         parent_node = ice_tm_node_search(dev, parent_node_id,
460                                           &parent_node_type);
461         if (!parent_node) {
462                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
463                 error->message = "parent not exist";
464                 return -EINVAL;
465         }
466         if (parent_node_type != ICE_TM_NODE_TYPE_PORT &&
467             parent_node_type != ICE_TM_NODE_TYPE_TC &&
468             parent_node_type != ICE_TM_NODE_TYPE_VSI &&
469             parent_node_type != ICE_TM_NODE_TYPE_QGROUP) {
470                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
471                 error->message = "parent is not valid";
472                 return -EINVAL;
473         }
474         /* check level */
475         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
476             level_id != (uint32_t)parent_node_type + 1) {
477                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
478                 error->message = "Wrong level";
479                 return -EINVAL;
480         }
481
482         /* check the node number */
483         if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
484                 /* check the TC number */
485                 if (pf->tm_conf.nb_tc_node >= tc_nb) {
486                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
487                         error->message = "too many TCs";
488                         return -EINVAL;
489                 }
490         } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) {
491                 /* check the VSI number */
492                 if (pf->tm_conf.nb_vsi_node >= vsi_nb) {
493                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
494                         error->message = "too many VSIs";
495                         return -EINVAL;
496                 }
497         } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) {
498                 /* check the queue group number */
499                 if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
500                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
501                         error->message = "too many queue groups";
502                         return -EINVAL;
503                 }
504         } else {
505                 /* check the queue number */
506                 if (parent_node->reference_count >= pf->dev_data->nb_tx_queues) {
507                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
508                         error->message = "too many queues";
509                         return -EINVAL;
510                 }
511                 if (node_id >= pf->dev_data->nb_tx_queues) {
512                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
513                         error->message = "too large queue id";
514                         return -EINVAL;
515                 }
516         }
517
518         /* add the TC or VSI or queue group or queue node */
519         tm_node = rte_zmalloc("ice_tm_node",
520                               sizeof(struct ice_tm_node),
521                               0);
522         if (!tm_node)
523                 return -ENOMEM;
524         tm_node->id = node_id;
525         tm_node->priority = priority;
526         tm_node->weight = weight;
527         tm_node->reference_count = 0;
528         tm_node->parent = parent_node;
529         tm_node->shaper_profile = shaper_profile;
530         tm_node->children = (struct ice_tm_node **)
531                         rte_calloc(NULL, 256, (sizeof(struct ice_tm_node *)), 0);
532         tm_node->parent->children[tm_node->parent->reference_count] = tm_node;
533
534         if (tm_node->priority != 0 && level_id != ICE_TM_NODE_TYPE_QUEUE &&
535             level_id != ICE_TM_NODE_TYPE_QGROUP)
536                 PMD_DRV_LOG(WARNING, "priority != 0 not supported in level %d",
537                             level_id);
538
539         if (tm_node->weight != 1 && level_id != ICE_TM_NODE_TYPE_QUEUE)
540                 PMD_DRV_LOG(WARNING, "weight != 1 not supported in level %d",
541                             level_id);
542
543         rte_memcpy(&tm_node->params, params,
544                          sizeof(struct rte_tm_node_params));
545         if (parent_node_type == ICE_TM_NODE_TYPE_PORT) {
546                 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
547                                   tm_node, node);
548                 tm_node->tc = pf->tm_conf.nb_tc_node;
549                 pf->tm_conf.nb_tc_node++;
550         } else if (parent_node_type == ICE_TM_NODE_TYPE_TC) {
551                 TAILQ_INSERT_TAIL(&pf->tm_conf.vsi_list,
552                                   tm_node, node);
553                 tm_node->tc = parent_node->tc;
554                 pf->tm_conf.nb_vsi_node++;
555         } else if (parent_node_type == ICE_TM_NODE_TYPE_VSI) {
556                 TAILQ_INSERT_TAIL(&pf->tm_conf.qgroup_list,
557                                   tm_node, node);
558                 tm_node->tc = parent_node->parent->tc;
559                 pf->tm_conf.nb_qgroup_node++;
560         } else {
561                 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
562                                   tm_node, node);
563                 tm_node->tc = parent_node->parent->parent->tc;
564                 pf->tm_conf.nb_queue_node++;
565         }
566         tm_node->parent->reference_count++;
567
568         return 0;
569 }
570
571 static int
572 ice_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
573                  struct rte_tm_error *error)
574 {
575         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
576         enum ice_tm_node_type node_type = ICE_TM_NODE_TYPE_MAX;
577         struct ice_tm_node *tm_node;
578
579         if (!error)
580                 return -EINVAL;
581
582         /* if already committed */
583         if (pf->tm_conf.committed) {
584                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
585                 error->message = "already committed";
586                 return -EINVAL;
587         }
588
589         if (node_id == RTE_TM_NODE_ID_NULL) {
590                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
591                 error->message = "invalid node id";
592                 return -EINVAL;
593         }
594
595         /* check if the node id exists */
596         tm_node = ice_tm_node_search(dev, node_id, &node_type);
597         if (!tm_node) {
598                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
599                 error->message = "no such node";
600                 return -EINVAL;
601         }
602
603         /* the node should have no child */
604         if (tm_node->reference_count) {
605                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
606                 error->message =
607                         "cannot delete a node which has children";
608                 return -EINVAL;
609         }
610
611         /* root node */
612         if (node_type == ICE_TM_NODE_TYPE_PORT) {
613                 rte_free(tm_node);
614                 pf->tm_conf.root = NULL;
615                 return 0;
616         }
617
618         /* TC or VSI or queue group or queue node */
619         tm_node->parent->reference_count--;
620         if (node_type == ICE_TM_NODE_TYPE_TC) {
621                 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
622                 pf->tm_conf.nb_tc_node--;
623         } else if (node_type == ICE_TM_NODE_TYPE_VSI) {
624                 TAILQ_REMOVE(&pf->tm_conf.vsi_list, tm_node, node);
625                 pf->tm_conf.nb_vsi_node--;
626         } else if (node_type == ICE_TM_NODE_TYPE_QGROUP) {
627                 TAILQ_REMOVE(&pf->tm_conf.qgroup_list, tm_node, node);
628                 pf->tm_conf.nb_qgroup_node--;
629         } else {
630                 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
631                 pf->tm_conf.nb_queue_node--;
632         }
633         rte_free(tm_node);
634
635         return 0;
636 }
637
638 static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev,
639                                   struct ice_sched_node *queue_sched_node,
640                                   struct ice_sched_node *dst_node,
641                                   uint16_t queue_id)
642 {
643         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644         struct ice_aqc_move_txqs_data *buf;
645         struct ice_sched_node *queue_parent_node;
646         uint8_t txqs_moved;
647         int ret = ICE_SUCCESS;
648         uint16_t buf_size = ice_struct_size(buf, txqs, 1);
649
650         buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf));
651
652         queue_parent_node = queue_sched_node->parent;
653         buf->src_teid = queue_parent_node->info.node_teid;
654         buf->dest_teid = dst_node->info.node_teid;
655         buf->txqs[0].q_teid = queue_sched_node->info.node_teid;
656         buf->txqs[0].txq_id = queue_id;
657
658         ret = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50,
659                                         NULL, buf, buf_size, &txqs_moved, NULL);
660         if (ret || txqs_moved == 0) {
661                 PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id);
662                 return ICE_ERR_PARAM;
663         }
664
665         if (queue_parent_node->num_children > 0) {
666                 queue_parent_node->num_children--;
667                 queue_parent_node->children[queue_parent_node->num_children] = NULL;
668         } else {
669                 PMD_DRV_LOG(ERR, "invalid children number %d for queue %u",
670                             queue_parent_node->num_children, queue_id);
671                 return ICE_ERR_PARAM;
672         }
673         dst_node->children[dst_node->num_children++] = queue_sched_node;
674         queue_sched_node->parent = dst_node;
675         ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info);
676
677         return ret;
678 }
679
680 static int ice_hierarchy_commit(struct rte_eth_dev *dev,
681                                  int clear_on_fail,
682                                  __rte_unused struct rte_tm_error *error)
683 {
684         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
685         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
686         struct ice_tm_node_list *qgroup_list = &pf->tm_conf.qgroup_list;
687         struct ice_tm_node_list *queue_list = &pf->tm_conf.queue_list;
688         struct ice_tm_node *tm_node;
689         struct ice_sched_node *node;
690         struct ice_sched_node *vsi_node;
691         struct ice_sched_node *queue_node;
692         struct ice_tx_queue *txq;
693         struct ice_vsi *vsi;
694         int ret_val = ICE_SUCCESS;
695         uint64_t peak = 0;
696         uint8_t priority;
697         uint32_t i;
698         uint32_t idx_vsi_child;
699         uint32_t idx_qg;
700         uint32_t nb_vsi_child;
701         uint32_t nb_qg;
702         uint32_t qid;
703         uint32_t q_teid;
704         uint32_t vsi_layer;
705
706         for (i = 0; i < dev->data->nb_tx_queues; i++) {
707                 ret_val = ice_tx_queue_stop(dev, i);
708                 if (ret_val) {
709                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
710                         PMD_DRV_LOG(ERR, "stop queue %u failed", i);
711                         goto fail_clear;
712                 }
713         }
714
715         node = hw->port_info->root;
716         vsi_layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
717         for (i = 0; i < vsi_layer; i++)
718                 node = node->children[0];
719         vsi_node = node;
720         nb_vsi_child = vsi_node->num_children;
721         nb_qg = vsi_node->children[0]->num_children;
722
723         idx_vsi_child = 0;
724         idx_qg = 0;
725
726         TAILQ_FOREACH(tm_node, qgroup_list, node) {
727                 struct ice_tm_node *tm_child_node;
728                 struct ice_sched_node *qgroup_sched_node =
729                         vsi_node->children[idx_vsi_child]->children[idx_qg];
730
731                 for (i = 0; i < tm_node->reference_count; i++) {
732                         tm_child_node = tm_node->children[i];
733                         qid = tm_child_node->id;
734                         ret_val = ice_tx_queue_start(dev, qid);
735                         if (ret_val) {
736                                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
737                                 PMD_DRV_LOG(ERR, "start queue %u failed", qid);
738                                 goto fail_clear;
739                         }
740                         txq = dev->data->tx_queues[qid];
741                         q_teid = txq->q_teid;
742                         queue_node = ice_sched_get_node(hw->port_info, q_teid);
743                         if (queue_node == NULL) {
744                                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
745                                 PMD_DRV_LOG(ERR, "get queue %u node failed", qid);
746                                 goto fail_clear;
747                         }
748                         if (queue_node->info.parent_teid == qgroup_sched_node->info.node_teid)
749                                 continue;
750                         ret_val = ice_move_recfg_lan_txq(dev, queue_node, qgroup_sched_node, qid);
751                         if (ret_val) {
752                                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
753                                 PMD_DRV_LOG(ERR, "move queue %u failed", qid);
754                                 goto fail_clear;
755                         }
756                 }
757                 if (tm_node->reference_count != 0 && tm_node->shaper_profile) {
758                         uint32_t node_teid = qgroup_sched_node->info.node_teid;
759                         /* Transfer from Byte per seconds to Kbps */
760                         peak = tm_node->shaper_profile->profile.peak.rate;
761                         peak = peak / 1000 * BITS_PER_BYTE;
762                         ret_val = ice_sched_set_node_bw_lmt_per_tc(hw->port_info,
763                                                                    node_teid,
764                                                                    ICE_AGG_TYPE_Q,
765                                                                    tm_node->tc,
766                                                                    ICE_MAX_BW,
767                                                                    (u32)peak);
768                         if (ret_val) {
769                                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
770                                 PMD_DRV_LOG(ERR,
771                                             "configure queue group %u bandwidth failed",
772                                             tm_node->id);
773                                 goto fail_clear;
774                         }
775                 }
776                 priority = 7 - tm_node->priority;
777                 ret_val = ice_sched_cfg_sibl_node_prio_lock(hw->port_info, qgroup_sched_node,
778                                                             priority);
779                 if (ret_val) {
780                         error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
781                         PMD_DRV_LOG(ERR, "configure queue group %u priority failed",
782                                     tm_node->priority);
783                         goto fail_clear;
784                 }
785                 idx_qg++;
786                 if (idx_qg >= nb_qg) {
787                         idx_qg = 0;
788                         idx_vsi_child++;
789                 }
790                 if (idx_vsi_child >= nb_vsi_child) {
791                         error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
792                         PMD_DRV_LOG(ERR, "too many queues");
793                         goto fail_clear;
794                 }
795         }
796
797         TAILQ_FOREACH(tm_node, queue_list, node) {
798                 qid = tm_node->id;
799                 txq = dev->data->tx_queues[qid];
800                 vsi = txq->vsi;
801                 q_teid = txq->q_teid;
802                 if (tm_node->shaper_profile) {
803                         /* Transfer from Byte per seconds to Kbps */
804                         peak = tm_node->shaper_profile->profile.peak.rate;
805                         peak = peak / 1000 * BITS_PER_BYTE;
806                         ret_val = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx,
807                                                    tm_node->tc, tm_node->id,
808                                                    ICE_MAX_BW, (u32)peak);
809                         if (ret_val) {
810                                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
811                                 PMD_DRV_LOG(ERR,
812                                             "configure queue %u bandwidth failed",
813                                             tm_node->id);
814                                 goto fail_clear;
815                         }
816                 }
817                 priority = 7 - tm_node->priority;
818                 ret_val = ice_cfg_vsi_q_priority(hw->port_info, 1,
819                                                  &q_teid, &priority);
820                 if (ret_val) {
821                         error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
822                         PMD_DRV_LOG(ERR, "configure queue %u priority failed", tm_node->priority);
823                         goto fail_clear;
824                 }
825
826                 ret_val = ice_cfg_q_bw_alloc(hw->port_info, vsi->idx,
827                                              tm_node->tc, tm_node->id,
828                                              ICE_MAX_BW, (u32)tm_node->weight);
829                 if (ret_val) {
830                         error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
831                         PMD_DRV_LOG(ERR, "configure queue %u weight failed", tm_node->weight);
832                         goto fail_clear;
833                 }
834         }
835
836         return ret_val;
837
838 fail_clear:
839         /* clear all the traffic manager configuration */
840         if (clear_on_fail) {
841                 ice_tm_conf_uninit(dev);
842                 ice_tm_conf_init(dev);
843         }
844         return ret_val;
845 }