net/iavf: fix QFI fields of GTPU UL/DL for flow director
[dpdk.git] / drivers / net / iavf / iavf_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <rte_tm_driver.h>
5
6 #include "iavf.h"
7
8 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
9                                  __rte_unused int clear_on_fail,
10                                  __rte_unused struct rte_tm_error *error);
11 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
12               uint32_t parent_node_id, uint32_t priority,
13               uint32_t weight, uint32_t level_id,
14               struct rte_tm_node_params *params,
15               struct rte_tm_error *error);
16 static int iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
17                             struct rte_tm_error *error);
18 static int iavf_tm_capabilities_get(struct rte_eth_dev *dev,
19                          struct rte_tm_capabilities *cap,
20                          struct rte_tm_error *error);
21 static int iavf_level_capabilities_get(struct rte_eth_dev *dev,
22                             uint32_t level_id,
23                             struct rte_tm_level_capabilities *cap,
24                             struct rte_tm_error *error);
25 static int iavf_node_capabilities_get(struct rte_eth_dev *dev,
26                                       uint32_t node_id,
27                                       struct rte_tm_node_capabilities *cap,
28                                       struct rte_tm_error *error);
29 static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
30                    int *is_leaf, struct rte_tm_error *error);
31
32 const struct rte_tm_ops iavf_tm_ops = {
33         .node_add = iavf_tm_node_add,
34         .node_delete = iavf_tm_node_delete,
35         .capabilities_get = iavf_tm_capabilities_get,
36         .level_capabilities_get = iavf_level_capabilities_get,
37         .node_capabilities_get = iavf_node_capabilities_get,
38         .node_type_get = iavf_node_type_get,
39         .hierarchy_commit = iavf_hierarchy_commit,
40 };
41
42 void
43 iavf_tm_conf_init(struct rte_eth_dev *dev)
44 {
45         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
46
47         /* initialize node configuration */
48         vf->tm_conf.root = NULL;
49         TAILQ_INIT(&vf->tm_conf.tc_list);
50         TAILQ_INIT(&vf->tm_conf.queue_list);
51         vf->tm_conf.nb_tc_node = 0;
52         vf->tm_conf.nb_queue_node = 0;
53         vf->tm_conf.committed = false;
54 }
55
56 void
57 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
58 {
59         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
60         struct iavf_tm_node *tm_node;
61
62         /* clear node configuration */
63         while ((tm_node = TAILQ_FIRST(&vf->tm_conf.queue_list))) {
64                 TAILQ_REMOVE(&vf->tm_conf.queue_list, tm_node, node);
65                 rte_free(tm_node);
66         }
67         vf->tm_conf.nb_queue_node = 0;
68         while ((tm_node = TAILQ_FIRST(&vf->tm_conf.tc_list))) {
69                 TAILQ_REMOVE(&vf->tm_conf.tc_list, tm_node, node);
70                 rte_free(tm_node);
71         }
72         vf->tm_conf.nb_tc_node = 0;
73         if (vf->tm_conf.root) {
74                 rte_free(vf->tm_conf.root);
75                 vf->tm_conf.root = NULL;
76         }
77 }
78
79 static inline struct iavf_tm_node *
80 iavf_tm_node_search(struct rte_eth_dev *dev,
81                     uint32_t node_id, enum iavf_tm_node_type *node_type)
82 {
83         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
84         struct iavf_tm_node_list *tc_list = &vf->tm_conf.tc_list;
85         struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
86         struct iavf_tm_node *tm_node;
87
88         if (vf->tm_conf.root && vf->tm_conf.root->id == node_id) {
89                 *node_type = IAVF_TM_NODE_TYPE_PORT;
90                 return vf->tm_conf.root;
91         }
92
93         TAILQ_FOREACH(tm_node, tc_list, node) {
94                 if (tm_node->id == node_id) {
95                         *node_type = IAVF_TM_NODE_TYPE_TC;
96                         return tm_node;
97                 }
98         }
99
100         TAILQ_FOREACH(tm_node, queue_list, node) {
101                 if (tm_node->id == node_id) {
102                         *node_type = IAVF_TM_NODE_TYPE_QUEUE;
103                         return tm_node;
104                 }
105         }
106
107         return NULL;
108 }
109
110 static int
111 iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
112                       uint32_t priority, uint32_t weight,
113                       struct rte_tm_node_params *params,
114                       struct rte_tm_error *error)
115 {
116         /* checked all the unsupported parameter */
117         if (node_id == RTE_TM_NODE_ID_NULL) {
118                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
119                 error->message = "invalid node id";
120                 return -EINVAL;
121         }
122
123         if (priority) {
124                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
125                 error->message = "priority should be 0";
126                 return -EINVAL;
127         }
128
129         if (weight != 1) {
130                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
131                 error->message = "weight must be 1";
132                 return -EINVAL;
133         }
134
135         /* not support shaper profile */
136         if (params->shaper_profile_id) {
137                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
138                 error->message = "shaper profile not supported";
139                 return -EINVAL;
140         }
141
142         /* not support shared shaper */
143         if (params->shared_shaper_id) {
144                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
145                 error->message = "shared shaper not supported";
146                 return -EINVAL;
147         }
148         if (params->n_shared_shapers) {
149                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
150                 error->message = "shared shaper not supported";
151                 return -EINVAL;
152         }
153
154         /* for non-leaf node */
155         if (node_id >= vf->num_queue_pairs) {
156                 if (params->nonleaf.wfq_weight_mode) {
157                         error->type =
158                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
159                         error->message = "WFQ not supported";
160                         return -EINVAL;
161                 }
162                 if (params->nonleaf.n_sp_priorities != 1) {
163                         error->type =
164                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
165                         error->message = "SP priority not supported";
166                         return -EINVAL;
167                 } else if (params->nonleaf.wfq_weight_mode &&
168                            !(*params->nonleaf.wfq_weight_mode)) {
169                         error->type =
170                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
171                         error->message = "WFP should be byte mode";
172                         return -EINVAL;
173                 }
174
175                 return 0;
176         }
177
178         /* for leaf node */
179         if (params->leaf.cman) {
180                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
181                 error->message = "Congestion management not supported";
182                 return -EINVAL;
183         }
184         if (params->leaf.wred.wred_profile_id !=
185             RTE_TM_WRED_PROFILE_ID_NONE) {
186                 error->type =
187                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
188                 error->message = "WRED not supported";
189                 return -EINVAL;
190         }
191         if (params->leaf.wred.shared_wred_context_id) {
192                 error->type =
193                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
194                 error->message = "WRED not supported";
195                 return -EINVAL;
196         }
197         if (params->leaf.wred.n_shared_wred_contexts) {
198                 error->type =
199                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
200                 error->message = "WRED not supported";
201                 return -EINVAL;
202         }
203
204         return 0;
205 }
206
207 static int
208 iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
209                    int *is_leaf, struct rte_tm_error *error)
210 {
211         enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
212         struct iavf_tm_node *tm_node;
213
214         if (!is_leaf || !error)
215                 return -EINVAL;
216
217         if (node_id == RTE_TM_NODE_ID_NULL) {
218                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
219                 error->message = "invalid node id";
220                 return -EINVAL;
221         }
222
223         /* check if the node id exists */
224         tm_node = iavf_tm_node_search(dev, node_id, &node_type);
225         if (!tm_node) {
226                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
227                 error->message = "no such node";
228                 return -EINVAL;
229         }
230
231         if (node_type == IAVF_TM_NODE_TYPE_QUEUE)
232                 *is_leaf = true;
233         else
234                 *is_leaf = false;
235
236         return 0;
237 }
238
239 static int
240 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
241               uint32_t parent_node_id, uint32_t priority,
242               uint32_t weight, uint32_t level_id,
243               struct rte_tm_node_params *params,
244               struct rte_tm_error *error)
245 {
246         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
247         enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
248         enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
249         struct iavf_tm_node *tm_node;
250         struct iavf_tm_node *parent_node;
251         uint16_t tc_nb = vf->qos_cap->num_elem;
252         int ret;
253
254         if (!params || !error)
255                 return -EINVAL;
256
257         /* if already committed */
258         if (vf->tm_conf.committed) {
259                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
260                 error->message = "already committed";
261                 return -EINVAL;
262         }
263
264         ret = iavf_node_param_check(vf, node_id, priority, weight,
265                                     params, error);
266         if (ret)
267                 return ret;
268
269         /* check if the node is already existed */
270         if (iavf_tm_node_search(dev, node_id, &node_type)) {
271                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
272                 error->message = "node id already used";
273                 return -EINVAL;
274         }
275
276         /* root node if not have a parent */
277         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
278                 /* check level */
279                 if (level_id != IAVF_TM_NODE_TYPE_PORT) {
280                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
281                         error->message = "Wrong level";
282                         return -EINVAL;
283                 }
284
285                 /* obviously no more than one root */
286                 if (vf->tm_conf.root) {
287                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
288                         error->message = "already have a root";
289                         return -EINVAL;
290                 }
291
292                 /* add the root node */
293                 tm_node = rte_zmalloc("iavf_tm_node",
294                                       sizeof(struct iavf_tm_node),
295                                       0);
296                 if (!tm_node)
297                         return -ENOMEM;
298                 tm_node->id = node_id;
299                 tm_node->parent = NULL;
300                 tm_node->reference_count = 0;
301                 rte_memcpy(&tm_node->params, params,
302                                  sizeof(struct rte_tm_node_params));
303                 vf->tm_conf.root = tm_node;
304                 return 0;
305         }
306
307         /* TC or queue node */
308         /* check the parent node */
309         parent_node = iavf_tm_node_search(dev, parent_node_id,
310                                           &parent_node_type);
311         if (!parent_node) {
312                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
313                 error->message = "parent not exist";
314                 return -EINVAL;
315         }
316         if (parent_node_type != IAVF_TM_NODE_TYPE_PORT &&
317             parent_node_type != IAVF_TM_NODE_TYPE_TC) {
318                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
319                 error->message = "parent is not root or TC";
320                 return -EINVAL;
321         }
322         /* check level */
323         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
324             level_id != parent_node_type + 1) {
325                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
326                 error->message = "Wrong level";
327                 return -EINVAL;
328         }
329
330         /* check the node number */
331         if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
332                 /* check the TC number */
333                 if (vf->tm_conf.nb_tc_node >= tc_nb) {
334                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
335                         error->message = "too many TCs";
336                         return -EINVAL;
337                 }
338         } else {
339                 /* check the queue number */
340                 if (parent_node->reference_count >= vf->num_queue_pairs) {
341                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
342                         error->message = "too many queues";
343                         return -EINVAL;
344                 }
345                 if (node_id >= vf->num_queue_pairs) {
346                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
347                         error->message = "too large queue id";
348                         return -EINVAL;
349                 }
350         }
351
352         /* add the TC or queue node */
353         tm_node = rte_zmalloc("iavf_tm_node",
354                               sizeof(struct iavf_tm_node),
355                               0);
356         if (!tm_node)
357                 return -ENOMEM;
358         tm_node->id = node_id;
359         tm_node->reference_count = 0;
360         tm_node->parent = parent_node;
361         rte_memcpy(&tm_node->params, params,
362                          sizeof(struct rte_tm_node_params));
363         if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
364                 TAILQ_INSERT_TAIL(&vf->tm_conf.tc_list,
365                                   tm_node, node);
366                 tm_node->tc = vf->tm_conf.nb_tc_node;
367                 vf->tm_conf.nb_tc_node++;
368         } else {
369                 TAILQ_INSERT_TAIL(&vf->tm_conf.queue_list,
370                                   tm_node, node);
371                 tm_node->tc = parent_node->tc;
372                 vf->tm_conf.nb_queue_node++;
373         }
374         tm_node->parent->reference_count++;
375
376         return 0;
377 }
378
379 static int
380 iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
381                  struct rte_tm_error *error)
382 {
383         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
384         enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
385         struct iavf_tm_node *tm_node;
386
387         if (!error)
388                 return -EINVAL;
389
390         /* if already committed */
391         if (vf->tm_conf.committed) {
392                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
393                 error->message = "already committed";
394                 return -EINVAL;
395         }
396
397         if (node_id == RTE_TM_NODE_ID_NULL) {
398                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
399                 error->message = "invalid node id";
400                 return -EINVAL;
401         }
402
403         /* check if the node id exists */
404         tm_node = iavf_tm_node_search(dev, node_id, &node_type);
405         if (!tm_node) {
406                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
407                 error->message = "no such node";
408                 return -EINVAL;
409         }
410
411         /* the node should have no child */
412         if (tm_node->reference_count) {
413                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
414                 error->message =
415                         "cannot delete a node which has children";
416                 return -EINVAL;
417         }
418
419         /* root node */
420         if (node_type == IAVF_TM_NODE_TYPE_PORT) {
421                 rte_free(tm_node);
422                 vf->tm_conf.root = NULL;
423                 return 0;
424         }
425
426         /* TC or queue node */
427         tm_node->parent->reference_count--;
428         if (node_type == IAVF_TM_NODE_TYPE_TC) {
429                 TAILQ_REMOVE(&vf->tm_conf.tc_list, tm_node, node);
430                 vf->tm_conf.nb_tc_node--;
431         } else {
432                 TAILQ_REMOVE(&vf->tm_conf.queue_list, tm_node, node);
433                 vf->tm_conf.nb_queue_node--;
434         }
435         rte_free(tm_node);
436
437         return 0;
438 }
439
440 static int
441 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
442                          struct rte_tm_capabilities *cap,
443                          struct rte_tm_error *error)
444 {
445         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
446         uint16_t tc_nb = vf->qos_cap->num_elem;
447
448         if (!cap || !error)
449                 return -EINVAL;
450
451         if (tc_nb > vf->vf_res->num_queue_pairs)
452                 return -EINVAL;
453
454         error->type = RTE_TM_ERROR_TYPE_NONE;
455
456         /* set all the parameters to 0 first. */
457         memset(cap, 0, sizeof(struct rte_tm_capabilities));
458
459         /**
460          * support port + TCs + queues
461          * here shows the max capability not the current configuration.
462          */
463         cap->n_nodes_max = 1 + IAVF_MAX_TRAFFIC_CLASS
464                 + vf->num_queue_pairs;
465         cap->n_levels_max = 3; /* port, TC, queue */
466         cap->non_leaf_nodes_identical = 1;
467         cap->leaf_nodes_identical = 1;
468         cap->shaper_n_max = cap->n_nodes_max;
469         cap->shaper_private_n_max = cap->n_nodes_max;
470         cap->shaper_private_dual_rate_n_max = 0;
471         cap->shaper_private_rate_min = 0;
472         /* Bytes per second */
473         cap->shaper_private_rate_max =
474                 (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
475         cap->shaper_private_packet_mode_supported = 0;
476         cap->shaper_private_byte_mode_supported = 1;
477         cap->shaper_shared_n_max = 0;
478         cap->shaper_shared_n_nodes_per_shaper_max = 0;
479         cap->shaper_shared_n_shapers_per_node_max = 0;
480         cap->shaper_shared_dual_rate_n_max = 0;
481         cap->shaper_shared_rate_min = 0;
482         cap->shaper_shared_rate_max = 0;
483         cap->shaper_shared_packet_mode_supported = 0;
484         cap->shaper_shared_byte_mode_supported = 0;
485         cap->sched_n_children_max = vf->num_queue_pairs;
486         cap->sched_sp_n_priorities_max = 1;
487         cap->sched_wfq_n_children_per_group_max = 0;
488         cap->sched_wfq_n_groups_max = 0;
489         cap->sched_wfq_weight_max = 1;
490         cap->sched_wfq_packet_mode_supported = 0;
491         cap->sched_wfq_byte_mode_supported = 0;
492         cap->cman_head_drop_supported = 0;
493         cap->dynamic_update_mask = 0;
494         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
495         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
496         cap->cman_wred_context_n_max = 0;
497         cap->cman_wred_context_private_n_max = 0;
498         cap->cman_wred_context_shared_n_max = 0;
499         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
500         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
501         cap->stats_mask = 0;
502
503         return 0;
504 }
505
506 static int
507 iavf_level_capabilities_get(struct rte_eth_dev *dev,
508                             uint32_t level_id,
509                             struct rte_tm_level_capabilities *cap,
510                             struct rte_tm_error *error)
511 {
512         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
513
514         if (!cap || !error)
515                 return -EINVAL;
516
517         if (level_id >= IAVF_TM_NODE_TYPE_MAX) {
518                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
519                 error->message = "too deep level";
520                 return -EINVAL;
521         }
522
523         /* root node */
524         if (level_id == IAVF_TM_NODE_TYPE_PORT) {
525                 cap->n_nodes_max = 1;
526                 cap->n_nodes_nonleaf_max = 1;
527                 cap->n_nodes_leaf_max = 0;
528         } else if (level_id == IAVF_TM_NODE_TYPE_TC) {
529                 /* TC */
530                 cap->n_nodes_max = IAVF_MAX_TRAFFIC_CLASS;
531                 cap->n_nodes_nonleaf_max = IAVF_MAX_TRAFFIC_CLASS;
532                 cap->n_nodes_leaf_max = 0;
533         } else {
534                 /* queue */
535                 cap->n_nodes_max = vf->num_queue_pairs;
536                 cap->n_nodes_nonleaf_max = 0;
537                 cap->n_nodes_leaf_max = vf->num_queue_pairs;
538         }
539
540         cap->non_leaf_nodes_identical = true;
541         cap->leaf_nodes_identical = true;
542
543         if (level_id != IAVF_TM_NODE_TYPE_QUEUE) {
544                 cap->nonleaf.shaper_private_supported = true;
545                 cap->nonleaf.shaper_private_dual_rate_supported = false;
546                 cap->nonleaf.shaper_private_rate_min = 0;
547                 /* Bytes per second */
548                 cap->nonleaf.shaper_private_rate_max =
549                         (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
550                 cap->nonleaf.shaper_private_packet_mode_supported = 0;
551                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
552                 cap->nonleaf.shaper_shared_n_max = 0;
553                 cap->nonleaf.shaper_shared_packet_mode_supported = 0;
554                 cap->nonleaf.shaper_shared_byte_mode_supported = 0;
555                 if (level_id == IAVF_TM_NODE_TYPE_PORT)
556                         cap->nonleaf.sched_n_children_max =
557                                 IAVF_MAX_TRAFFIC_CLASS;
558                 else
559                         cap->nonleaf.sched_n_children_max =
560                                 vf->num_queue_pairs;
561                 cap->nonleaf.sched_sp_n_priorities_max = 1;
562                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
563                 cap->nonleaf.sched_wfq_n_groups_max = 0;
564                 cap->nonleaf.sched_wfq_weight_max = 1;
565                 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
566                 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
567                 cap->nonleaf.stats_mask = 0;
568
569                 return 0;
570         }
571
572         /* queue node */
573         cap->leaf.shaper_private_supported = false;
574         cap->leaf.shaper_private_dual_rate_supported = false;
575         cap->leaf.shaper_private_rate_min = 0;
576         /* Bytes per second */
577         cap->leaf.shaper_private_rate_max =
578                 (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
579         cap->leaf.shaper_private_packet_mode_supported = 0;
580         cap->leaf.shaper_private_byte_mode_supported = 1;
581         cap->leaf.shaper_shared_n_max = 0;
582         cap->leaf.shaper_shared_packet_mode_supported = 0;
583         cap->leaf.shaper_shared_byte_mode_supported = 0;
584         cap->leaf.cman_head_drop_supported = false;
585         cap->leaf.cman_wred_context_private_supported = true;
586         cap->leaf.cman_wred_context_shared_n_max = 0;
587         cap->leaf.stats_mask = 0;
588
589         return 0;
590 }
591
592 static int
593 iavf_node_capabilities_get(struct rte_eth_dev *dev,
594                            uint32_t node_id,
595                            struct rte_tm_node_capabilities *cap,
596                            struct rte_tm_error *error)
597 {
598         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
599         enum iavf_tm_node_type node_type;
600         struct virtchnl_qos_cap_elem tc_cap;
601         struct iavf_tm_node *tm_node;
602
603         if (!cap || !error)
604                 return -EINVAL;
605
606         if (node_id == RTE_TM_NODE_ID_NULL) {
607                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
608                 error->message = "invalid node id";
609                 return -EINVAL;
610         }
611
612         /* check if the node id exists */
613         tm_node = iavf_tm_node_search(dev, node_id, &node_type);
614         if (!tm_node) {
615                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
616                 error->message = "no such node";
617                 return -EINVAL;
618         }
619
620         if (node_type != IAVF_TM_NODE_TYPE_TC) {
621                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
622                 error->message = "not support capability get";
623                 return -EINVAL;
624         }
625
626         tc_cap = vf->qos_cap->cap[tm_node->tc];
627         if (tc_cap.tc_num != tm_node->tc) {
628                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
629                 error->message = "tc not match";
630                 return -EINVAL;
631         }
632
633         cap->shaper_private_supported = true;
634         cap->shaper_private_dual_rate_supported = false;
635         /* Bytes per second */
636         cap->shaper_private_rate_min =
637                 (uint64_t)tc_cap.shaper.committed * 1000 / IAVF_BITS_PER_BYTE;
638         cap->shaper_private_rate_max =
639                 (uint64_t)tc_cap.shaper.peak * 1000 / IAVF_BITS_PER_BYTE;
640         cap->shaper_shared_n_max = 0;
641         cap->nonleaf.sched_n_children_max = vf->num_queue_pairs;
642         cap->nonleaf.sched_sp_n_priorities_max = 1;
643         cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
644         cap->nonleaf.sched_wfq_n_groups_max = 0;
645         cap->nonleaf.sched_wfq_weight_max = tc_cap.weight;
646         cap->stats_mask = 0;
647
648         return 0;
649 }
650
651 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
652                                  int clear_on_fail,
653                                  __rte_unused struct rte_tm_error *error)
654 {
655         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
656         struct iavf_adapter *adapter =
657                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
658         struct virtchnl_queue_tc_mapping *q_tc_mapping;
659         struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
660         struct iavf_tm_node *tm_node;
661         struct iavf_qtc_map *qtc_map;
662         uint16_t size;
663         int index = 0, node_committed = 0;
664         int i, ret_val = IAVF_SUCCESS;
665
666         /* check if port is stopped */
667         if (adapter->stopped != 1) {
668                 PMD_DRV_LOG(ERR, "Please stop port first");
669                 ret_val = IAVF_ERR_NOT_READY;
670                 goto err;
671         }
672
673         if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)) {
674                 PMD_DRV_LOG(ERR, "VF queue tc mapping is not supported");
675                 ret_val = IAVF_NOT_SUPPORTED;
676                 goto fail_clear;
677         }
678
679         /* check if all TC nodes are set with VF vsi */
680         if (vf->tm_conf.nb_tc_node != vf->qos_cap->num_elem) {
681                 PMD_DRV_LOG(ERR, "Does not set VF vsi nodes to all TCs");
682                 ret_val = IAVF_ERR_PARAM;
683                 goto fail_clear;
684         }
685
686         size = sizeof(*q_tc_mapping) + sizeof(q_tc_mapping->tc[0]) *
687                 (vf->qos_cap->num_elem - 1);
688         q_tc_mapping = rte_zmalloc("q_tc", size, 0);
689         if (!q_tc_mapping) {
690                 ret_val = IAVF_ERR_NO_MEMORY;
691                 goto fail_clear;
692         }
693
694         q_tc_mapping->vsi_id = vf->vsi.vsi_id;
695         q_tc_mapping->num_tc = vf->qos_cap->num_elem;
696         q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
697
698         TAILQ_FOREACH(tm_node, queue_list, node) {
699                 if (tm_node->tc >= q_tc_mapping->num_tc) {
700                         PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
701                         ret_val = IAVF_ERR_PARAM;
702                         goto fail_clear;
703                 }
704                 q_tc_mapping->tc[tm_node->tc].req.queue_count++;
705                 node_committed++;
706         }
707
708         /* All queues allocated to this VF should be mapped */
709         if (node_committed < vf->num_queue_pairs) {
710                 PMD_DRV_LOG(ERR, "queue node is less than allocated queue pairs");
711                 ret_val = IAVF_ERR_PARAM;
712                 goto fail_clear;
713         }
714
715         /* store the queue TC mapping info */
716         qtc_map = rte_zmalloc("qtc_map",
717                 sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
718         if (!qtc_map)
719                 return IAVF_ERR_NO_MEMORY;
720
721         for (i = 0; i < q_tc_mapping->num_tc; i++) {
722                 q_tc_mapping->tc[i].req.start_queue_id = index;
723                 index += q_tc_mapping->tc[i].req.queue_count;
724                 qtc_map[i].tc = i;
725                 qtc_map[i].start_queue_id =
726                         q_tc_mapping->tc[i].req.start_queue_id;
727                 qtc_map[i].queue_count = q_tc_mapping->tc[i].req.queue_count;
728         }
729
730         ret_val = iavf_set_q_tc_map(dev, q_tc_mapping, size);
731         if (ret_val)
732                 goto fail_clear;
733
734         vf->qtc_map = qtc_map;
735         vf->tm_conf.committed = true;
736         return ret_val;
737
738 fail_clear:
739         /* clear all the traffic manager configuration */
740         if (clear_on_fail) {
741                 iavf_tm_conf_uninit(dev);
742                 iavf_tm_conf_init(dev);
743         }
744 err:
745         return ret_val;
746 }