net/ice: fix outer L4 checksum in scalar Rx
[dpdk.git] / drivers / net / iavf / iavf_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <rte_tm_driver.h>
5
6 #include "iavf.h"
7
8 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
9                                  __rte_unused int clear_on_fail,
10                                  __rte_unused struct rte_tm_error *error);
11 static int iavf_shaper_profile_add(struct rte_eth_dev *dev,
12                                    uint32_t shaper_profile_id,
13                                    struct rte_tm_shaper_params *profile,
14                                    struct rte_tm_error *error);
15 static int iavf_shaper_profile_del(struct rte_eth_dev *dev,
16                                    uint32_t shaper_profile_id,
17                                    struct rte_tm_error *error);
18 static int iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
19               uint32_t parent_node_id, uint32_t priority,
20               uint32_t weight, uint32_t level_id,
21               struct rte_tm_node_params *params,
22               struct rte_tm_error *error);
23 static int iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
24                             struct rte_tm_error *error);
25 static int iavf_tm_capabilities_get(struct rte_eth_dev *dev,
26                          struct rte_tm_capabilities *cap,
27                          struct rte_tm_error *error);
28 static int iavf_level_capabilities_get(struct rte_eth_dev *dev,
29                             uint32_t level_id,
30                             struct rte_tm_level_capabilities *cap,
31                             struct rte_tm_error *error);
32 static int iavf_node_capabilities_get(struct rte_eth_dev *dev,
33                                       uint32_t node_id,
34                                       struct rte_tm_node_capabilities *cap,
35                                       struct rte_tm_error *error);
36 static int iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
37                    int *is_leaf, struct rte_tm_error *error);
38
39 const struct rte_tm_ops iavf_tm_ops = {
40         .shaper_profile_add = iavf_shaper_profile_add,
41         .shaper_profile_delete = iavf_shaper_profile_del,
42         .node_add = iavf_tm_node_add,
43         .node_delete = iavf_tm_node_delete,
44         .capabilities_get = iavf_tm_capabilities_get,
45         .level_capabilities_get = iavf_level_capabilities_get,
46         .node_capabilities_get = iavf_node_capabilities_get,
47         .node_type_get = iavf_node_type_get,
48         .hierarchy_commit = iavf_hierarchy_commit,
49 };
50
51 void
52 iavf_tm_conf_init(struct rte_eth_dev *dev)
53 {
54         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
55
56         /* initialize shaper profile list */
57         TAILQ_INIT(&vf->tm_conf.shaper_profile_list);
58
59         /* initialize node configuration */
60         vf->tm_conf.root = NULL;
61         TAILQ_INIT(&vf->tm_conf.tc_list);
62         TAILQ_INIT(&vf->tm_conf.queue_list);
63         vf->tm_conf.nb_tc_node = 0;
64         vf->tm_conf.nb_queue_node = 0;
65         vf->tm_conf.committed = false;
66 }
67
68 void
69 iavf_tm_conf_uninit(struct rte_eth_dev *dev)
70 {
71         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
72         struct iavf_tm_shaper_profile *shaper_profile;
73         struct iavf_tm_node *tm_node;
74
75         /* clear node configuration */
76         while ((tm_node = TAILQ_FIRST(&vf->tm_conf.queue_list))) {
77                 TAILQ_REMOVE(&vf->tm_conf.queue_list, tm_node, node);
78                 rte_free(tm_node);
79         }
80         vf->tm_conf.nb_queue_node = 0;
81         while ((tm_node = TAILQ_FIRST(&vf->tm_conf.tc_list))) {
82                 TAILQ_REMOVE(&vf->tm_conf.tc_list, tm_node, node);
83                 rte_free(tm_node);
84         }
85         vf->tm_conf.nb_tc_node = 0;
86         if (vf->tm_conf.root) {
87                 rte_free(vf->tm_conf.root);
88                 vf->tm_conf.root = NULL;
89         }
90
91         /* Remove all shaper profiles */
92         while ((shaper_profile =
93                TAILQ_FIRST(&vf->tm_conf.shaper_profile_list))) {
94                 TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list,
95                              shaper_profile, node);
96                 rte_free(shaper_profile);
97         }
98 }
99
100 static inline struct iavf_tm_node *
101 iavf_tm_node_search(struct rte_eth_dev *dev,
102                     uint32_t node_id, enum iavf_tm_node_type *node_type)
103 {
104         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
105         struct iavf_tm_node_list *tc_list = &vf->tm_conf.tc_list;
106         struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
107         struct iavf_tm_node *tm_node;
108
109         if (vf->tm_conf.root && vf->tm_conf.root->id == node_id) {
110                 *node_type = IAVF_TM_NODE_TYPE_PORT;
111                 return vf->tm_conf.root;
112         }
113
114         TAILQ_FOREACH(tm_node, tc_list, node) {
115                 if (tm_node->id == node_id) {
116                         *node_type = IAVF_TM_NODE_TYPE_TC;
117                         return tm_node;
118                 }
119         }
120
121         TAILQ_FOREACH(tm_node, queue_list, node) {
122                 if (tm_node->id == node_id) {
123                         *node_type = IAVF_TM_NODE_TYPE_QUEUE;
124                         return tm_node;
125                 }
126         }
127
128         return NULL;
129 }
130
131 static int
132 iavf_node_param_check(struct iavf_info *vf, uint32_t node_id,
133                       uint32_t priority, uint32_t weight,
134                       struct rte_tm_node_params *params,
135                       struct rte_tm_error *error)
136 {
137         /* checked all the unsupported parameter */
138         if (node_id == RTE_TM_NODE_ID_NULL) {
139                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
140                 error->message = "invalid node id";
141                 return -EINVAL;
142         }
143
144         if (priority) {
145                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
146                 error->message = "priority should be 0";
147                 return -EINVAL;
148         }
149
150         if (weight != 1) {
151                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
152                 error->message = "weight must be 1";
153                 return -EINVAL;
154         }
155
156         /* not support shared shaper */
157         if (params->shared_shaper_id) {
158                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
159                 error->message = "shared shaper not supported";
160                 return -EINVAL;
161         }
162         if (params->n_shared_shapers) {
163                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
164                 error->message = "shared shaper not supported";
165                 return -EINVAL;
166         }
167
168         /* for non-leaf node */
169         if (node_id >= vf->num_queue_pairs) {
170                 if (params->nonleaf.wfq_weight_mode) {
171                         error->type =
172                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
173                         error->message = "WFQ not supported";
174                         return -EINVAL;
175                 }
176                 if (params->nonleaf.n_sp_priorities != 1) {
177                         error->type =
178                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
179                         error->message = "SP priority not supported";
180                         return -EINVAL;
181                 } else if (params->nonleaf.wfq_weight_mode &&
182                            !(*params->nonleaf.wfq_weight_mode)) {
183                         error->type =
184                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
185                         error->message = "WFP should be byte mode";
186                         return -EINVAL;
187                 }
188
189                 return 0;
190         }
191
192         /* for leaf node */
193         if (params->leaf.cman) {
194                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
195                 error->message = "Congestion management not supported";
196                 return -EINVAL;
197         }
198         if (params->leaf.wred.wred_profile_id !=
199             RTE_TM_WRED_PROFILE_ID_NONE) {
200                 error->type =
201                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
202                 error->message = "WRED not supported";
203                 return -EINVAL;
204         }
205         if (params->leaf.wred.shared_wred_context_id) {
206                 error->type =
207                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
208                 error->message = "WRED not supported";
209                 return -EINVAL;
210         }
211         if (params->leaf.wred.n_shared_wred_contexts) {
212                 error->type =
213                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
214                 error->message = "WRED not supported";
215                 return -EINVAL;
216         }
217
218         return 0;
219 }
220
221 static int
222 iavf_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
223                    int *is_leaf, struct rte_tm_error *error)
224 {
225         enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
226         struct iavf_tm_node *tm_node;
227
228         if (!is_leaf || !error)
229                 return -EINVAL;
230
231         if (node_id == RTE_TM_NODE_ID_NULL) {
232                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
233                 error->message = "invalid node id";
234                 return -EINVAL;
235         }
236
237         /* check if the node id exists */
238         tm_node = iavf_tm_node_search(dev, node_id, &node_type);
239         if (!tm_node) {
240                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
241                 error->message = "no such node";
242                 return -EINVAL;
243         }
244
245         if (node_type == IAVF_TM_NODE_TYPE_QUEUE)
246                 *is_leaf = true;
247         else
248                 *is_leaf = false;
249
250         return 0;
251 }
252
253 static inline struct iavf_tm_shaper_profile *
254 iavf_shaper_profile_search(struct rte_eth_dev *dev,
255                            uint32_t shaper_profile_id)
256 {
257         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
258         struct iavf_shaper_profile_list *shaper_profile_list =
259                 &vf->tm_conf.shaper_profile_list;
260         struct iavf_tm_shaper_profile *shaper_profile;
261
262         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
263                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
264                         return shaper_profile;
265         }
266
267         return NULL;
268 }
269
270 static int
271 iavf_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
272               uint32_t parent_node_id, uint32_t priority,
273               uint32_t weight, uint32_t level_id,
274               struct rte_tm_node_params *params,
275               struct rte_tm_error *error)
276 {
277         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
278         enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
279         enum iavf_tm_node_type parent_node_type = IAVF_TM_NODE_TYPE_MAX;
280         struct iavf_tm_shaper_profile *shaper_profile = NULL;
281         struct iavf_tm_node *tm_node;
282         struct iavf_tm_node *parent_node;
283         uint16_t tc_nb = vf->qos_cap->num_elem;
284         int ret;
285
286         if (!params || !error)
287                 return -EINVAL;
288
289         /* if already committed */
290         if (vf->tm_conf.committed) {
291                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
292                 error->message = "already committed";
293                 return -EINVAL;
294         }
295
296         ret = iavf_node_param_check(vf, node_id, priority, weight,
297                                     params, error);
298         if (ret)
299                 return ret;
300
301         /* check if the node is already existed */
302         if (iavf_tm_node_search(dev, node_id, &node_type)) {
303                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
304                 error->message = "node id already used";
305                 return -EINVAL;
306         }
307
308         /* check the shaper profile id */
309         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
310                 shaper_profile = iavf_shaper_profile_search(dev,
311                         params->shaper_profile_id);
312                 if (!shaper_profile) {
313                         error->type =
314                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
315                         error->message = "shaper profile not exist";
316                         return -EINVAL;
317                 }
318         }
319
320         /* root node if not have a parent */
321         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
322                 /* check level */
323                 if (level_id != IAVF_TM_NODE_TYPE_PORT) {
324                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
325                         error->message = "Wrong level";
326                         return -EINVAL;
327                 }
328
329                 /* obviously no more than one root */
330                 if (vf->tm_conf.root) {
331                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
332                         error->message = "already have a root";
333                         return -EINVAL;
334                 }
335
336                 /* add the root node */
337                 tm_node = rte_zmalloc("iavf_tm_node",
338                                       sizeof(struct iavf_tm_node),
339                                       0);
340                 if (!tm_node)
341                         return -ENOMEM;
342                 tm_node->id = node_id;
343                 tm_node->parent = NULL;
344                 tm_node->reference_count = 0;
345                 rte_memcpy(&tm_node->params, params,
346                                  sizeof(struct rte_tm_node_params));
347                 vf->tm_conf.root = tm_node;
348                 return 0;
349         }
350
351         /* TC or queue node */
352         /* check the parent node */
353         parent_node = iavf_tm_node_search(dev, parent_node_id,
354                                           &parent_node_type);
355         if (!parent_node) {
356                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
357                 error->message = "parent not exist";
358                 return -EINVAL;
359         }
360         if (parent_node_type != IAVF_TM_NODE_TYPE_PORT &&
361             parent_node_type != IAVF_TM_NODE_TYPE_TC) {
362                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
363                 error->message = "parent is not root or TC";
364                 return -EINVAL;
365         }
366         /* check level */
367         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
368             level_id != (uint32_t)parent_node_type + 1) {
369                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
370                 error->message = "Wrong level";
371                 return -EINVAL;
372         }
373
374         /* check the node number */
375         if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
376                 /* check the TC number */
377                 if (vf->tm_conf.nb_tc_node >= tc_nb) {
378                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
379                         error->message = "too many TCs";
380                         return -EINVAL;
381                 }
382         } else {
383                 /* check the queue number */
384                 if (parent_node->reference_count >= vf->num_queue_pairs) {
385                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
386                         error->message = "too many queues";
387                         return -EINVAL;
388                 }
389                 if (node_id >= vf->num_queue_pairs) {
390                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
391                         error->message = "too large queue id";
392                         return -EINVAL;
393                 }
394         }
395
396         /* add the TC or queue node */
397         tm_node = rte_zmalloc("iavf_tm_node",
398                               sizeof(struct iavf_tm_node),
399                               0);
400         if (!tm_node)
401                 return -ENOMEM;
402         tm_node->id = node_id;
403         tm_node->reference_count = 0;
404         tm_node->parent = parent_node;
405         tm_node->shaper_profile = shaper_profile;
406         rte_memcpy(&tm_node->params, params,
407                          sizeof(struct rte_tm_node_params));
408         if (parent_node_type == IAVF_TM_NODE_TYPE_PORT) {
409                 TAILQ_INSERT_TAIL(&vf->tm_conf.tc_list,
410                                   tm_node, node);
411                 tm_node->tc = vf->tm_conf.nb_tc_node;
412                 vf->tm_conf.nb_tc_node++;
413         } else {
414                 TAILQ_INSERT_TAIL(&vf->tm_conf.queue_list,
415                                   tm_node, node);
416                 tm_node->tc = parent_node->tc;
417                 vf->tm_conf.nb_queue_node++;
418         }
419         tm_node->parent->reference_count++;
420
421         /* increase the reference counter of the shaper profile */
422         if (shaper_profile)
423                 shaper_profile->reference_count++;
424
425         return 0;
426 }
427
428 static int
429 iavf_tm_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
430                  struct rte_tm_error *error)
431 {
432         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
433         enum iavf_tm_node_type node_type = IAVF_TM_NODE_TYPE_MAX;
434         struct iavf_tm_node *tm_node;
435
436         if (!error)
437                 return -EINVAL;
438
439         /* if already committed */
440         if (vf->tm_conf.committed) {
441                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
442                 error->message = "already committed";
443                 return -EINVAL;
444         }
445
446         if (node_id == RTE_TM_NODE_ID_NULL) {
447                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
448                 error->message = "invalid node id";
449                 return -EINVAL;
450         }
451
452         /* check if the node id exists */
453         tm_node = iavf_tm_node_search(dev, node_id, &node_type);
454         if (!tm_node) {
455                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
456                 error->message = "no such node";
457                 return -EINVAL;
458         }
459
460         /* the node should have no child */
461         if (tm_node->reference_count) {
462                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
463                 error->message =
464                         "cannot delete a node which has children";
465                 return -EINVAL;
466         }
467
468         /* root node */
469         if (node_type == IAVF_TM_NODE_TYPE_PORT) {
470                 rte_free(tm_node);
471                 vf->tm_conf.root = NULL;
472                 return 0;
473         }
474
475         /* TC or queue node */
476         tm_node->parent->reference_count--;
477         if (node_type == IAVF_TM_NODE_TYPE_TC) {
478                 TAILQ_REMOVE(&vf->tm_conf.tc_list, tm_node, node);
479                 vf->tm_conf.nb_tc_node--;
480         } else {
481                 TAILQ_REMOVE(&vf->tm_conf.queue_list, tm_node, node);
482                 vf->tm_conf.nb_queue_node--;
483         }
484         rte_free(tm_node);
485
486         return 0;
487 }
488
489 static int
490 iavf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
491                                 struct rte_tm_error *error)
492 {
493         /* min bucket size not supported */
494         if (profile->committed.size) {
495                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
496                 error->message = "committed bucket size not supported";
497                 return -EINVAL;
498         }
499         /* max bucket size not supported */
500         if (profile->peak.size) {
501                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
502                 error->message = "peak bucket size not supported";
503                 return -EINVAL;
504         }
505         /* length adjustment not supported */
506         if (profile->pkt_length_adjust) {
507                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
508                 error->message = "packet length adjustment not supported";
509                 return -EINVAL;
510         }
511
512         return 0;
513 }
514
515 static int
516 iavf_shaper_profile_add(struct rte_eth_dev *dev,
517                         uint32_t shaper_profile_id,
518                         struct rte_tm_shaper_params *profile,
519                         struct rte_tm_error *error)
520 {
521         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
522         struct iavf_tm_shaper_profile *shaper_profile;
523         int ret;
524
525         if (!profile || !error)
526                 return -EINVAL;
527
528         ret = iavf_shaper_profile_param_check(profile, error);
529         if (ret)
530                 return ret;
531
532         shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
533
534         if (shaper_profile) {
535                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
536                 error->message = "profile ID exist";
537                 return -EINVAL;
538         }
539
540         shaper_profile = rte_zmalloc("iavf_tm_shaper_profile",
541                                      sizeof(struct iavf_tm_shaper_profile),
542                                      0);
543         if (!shaper_profile)
544                 return -ENOMEM;
545         shaper_profile->shaper_profile_id = shaper_profile_id;
546         rte_memcpy(&shaper_profile->profile, profile,
547                          sizeof(struct rte_tm_shaper_params));
548         TAILQ_INSERT_TAIL(&vf->tm_conf.shaper_profile_list,
549                           shaper_profile, node);
550
551         return 0;
552 }
553
554 static int
555 iavf_shaper_profile_del(struct rte_eth_dev *dev,
556                         uint32_t shaper_profile_id,
557                         struct rte_tm_error *error)
558 {
559         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
560         struct iavf_tm_shaper_profile *shaper_profile;
561
562         if (!error)
563                 return -EINVAL;
564
565         shaper_profile = iavf_shaper_profile_search(dev, shaper_profile_id);
566
567         if (!shaper_profile) {
568                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
569                 error->message = "profile ID not exist";
570                 return -EINVAL;
571         }
572
573         /* don't delete a profile if it's used by one or several nodes */
574         if (shaper_profile->reference_count) {
575                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
576                 error->message = "profile in use";
577                 return -EINVAL;
578         }
579
580         TAILQ_REMOVE(&vf->tm_conf.shaper_profile_list, shaper_profile, node);
581         rte_free(shaper_profile);
582
583         return 0;
584 }
585
586 static int
587 iavf_tm_capabilities_get(struct rte_eth_dev *dev,
588                          struct rte_tm_capabilities *cap,
589                          struct rte_tm_error *error)
590 {
591         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
592         uint16_t tc_nb = vf->qos_cap->num_elem;
593
594         if (!cap || !error)
595                 return -EINVAL;
596
597         if (tc_nb > vf->vf_res->num_queue_pairs)
598                 return -EINVAL;
599
600         error->type = RTE_TM_ERROR_TYPE_NONE;
601
602         /* set all the parameters to 0 first. */
603         memset(cap, 0, sizeof(struct rte_tm_capabilities));
604
605         /**
606          * support port + TCs + queues
607          * here shows the max capability not the current configuration.
608          */
609         cap->n_nodes_max = 1 + IAVF_MAX_TRAFFIC_CLASS
610                 + vf->num_queue_pairs;
611         cap->n_levels_max = 3; /* port, TC, queue */
612         cap->non_leaf_nodes_identical = 1;
613         cap->leaf_nodes_identical = 1;
614         cap->shaper_n_max = cap->n_nodes_max;
615         cap->shaper_private_n_max = cap->n_nodes_max;
616         cap->shaper_private_dual_rate_n_max = 0;
617         cap->shaper_private_rate_min = 0;
618         /* Bytes per second */
619         cap->shaper_private_rate_max =
620                 (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
621         cap->shaper_private_packet_mode_supported = 0;
622         cap->shaper_private_byte_mode_supported = 1;
623         cap->shaper_shared_n_max = 0;
624         cap->shaper_shared_n_nodes_per_shaper_max = 0;
625         cap->shaper_shared_n_shapers_per_node_max = 0;
626         cap->shaper_shared_dual_rate_n_max = 0;
627         cap->shaper_shared_rate_min = 0;
628         cap->shaper_shared_rate_max = 0;
629         cap->shaper_shared_packet_mode_supported = 0;
630         cap->shaper_shared_byte_mode_supported = 0;
631         cap->sched_n_children_max = vf->num_queue_pairs;
632         cap->sched_sp_n_priorities_max = 1;
633         cap->sched_wfq_n_children_per_group_max = 0;
634         cap->sched_wfq_n_groups_max = 0;
635         cap->sched_wfq_weight_max = 1;
636         cap->sched_wfq_packet_mode_supported = 0;
637         cap->sched_wfq_byte_mode_supported = 0;
638         cap->cman_head_drop_supported = 0;
639         cap->dynamic_update_mask = 0;
640         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
641         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
642         cap->cman_wred_context_n_max = 0;
643         cap->cman_wred_context_private_n_max = 0;
644         cap->cman_wred_context_shared_n_max = 0;
645         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
646         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
647         cap->stats_mask = 0;
648
649         return 0;
650 }
651
652 static int
653 iavf_level_capabilities_get(struct rte_eth_dev *dev,
654                             uint32_t level_id,
655                             struct rte_tm_level_capabilities *cap,
656                             struct rte_tm_error *error)
657 {
658         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
659
660         if (!cap || !error)
661                 return -EINVAL;
662
663         if (level_id >= IAVF_TM_NODE_TYPE_MAX) {
664                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
665                 error->message = "too deep level";
666                 return -EINVAL;
667         }
668
669         /* root node */
670         if (level_id == IAVF_TM_NODE_TYPE_PORT) {
671                 cap->n_nodes_max = 1;
672                 cap->n_nodes_nonleaf_max = 1;
673                 cap->n_nodes_leaf_max = 0;
674         } else if (level_id == IAVF_TM_NODE_TYPE_TC) {
675                 /* TC */
676                 cap->n_nodes_max = IAVF_MAX_TRAFFIC_CLASS;
677                 cap->n_nodes_nonleaf_max = IAVF_MAX_TRAFFIC_CLASS;
678                 cap->n_nodes_leaf_max = 0;
679         } else {
680                 /* queue */
681                 cap->n_nodes_max = vf->num_queue_pairs;
682                 cap->n_nodes_nonleaf_max = 0;
683                 cap->n_nodes_leaf_max = vf->num_queue_pairs;
684         }
685
686         cap->non_leaf_nodes_identical = true;
687         cap->leaf_nodes_identical = true;
688
689         if (level_id != IAVF_TM_NODE_TYPE_QUEUE) {
690                 cap->nonleaf.shaper_private_supported = true;
691                 cap->nonleaf.shaper_private_dual_rate_supported = false;
692                 cap->nonleaf.shaper_private_rate_min = 0;
693                 /* Bytes per second */
694                 cap->nonleaf.shaper_private_rate_max =
695                         (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
696                 cap->nonleaf.shaper_private_packet_mode_supported = 0;
697                 cap->nonleaf.shaper_private_byte_mode_supported = 1;
698                 cap->nonleaf.shaper_shared_n_max = 0;
699                 cap->nonleaf.shaper_shared_packet_mode_supported = 0;
700                 cap->nonleaf.shaper_shared_byte_mode_supported = 0;
701                 if (level_id == IAVF_TM_NODE_TYPE_PORT)
702                         cap->nonleaf.sched_n_children_max =
703                                 IAVF_MAX_TRAFFIC_CLASS;
704                 else
705                         cap->nonleaf.sched_n_children_max =
706                                 vf->num_queue_pairs;
707                 cap->nonleaf.sched_sp_n_priorities_max = 1;
708                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
709                 cap->nonleaf.sched_wfq_n_groups_max = 0;
710                 cap->nonleaf.sched_wfq_weight_max = 1;
711                 cap->nonleaf.sched_wfq_packet_mode_supported = 0;
712                 cap->nonleaf.sched_wfq_byte_mode_supported = 0;
713                 cap->nonleaf.stats_mask = 0;
714
715                 return 0;
716         }
717
718         /* queue node */
719         cap->leaf.shaper_private_supported = false;
720         cap->leaf.shaper_private_dual_rate_supported = false;
721         cap->leaf.shaper_private_rate_min = 0;
722         /* Bytes per second */
723         cap->leaf.shaper_private_rate_max =
724                 (uint64_t)vf->link_speed * 1000000 / IAVF_BITS_PER_BYTE;
725         cap->leaf.shaper_private_packet_mode_supported = 0;
726         cap->leaf.shaper_private_byte_mode_supported = 1;
727         cap->leaf.shaper_shared_n_max = 0;
728         cap->leaf.shaper_shared_packet_mode_supported = 0;
729         cap->leaf.shaper_shared_byte_mode_supported = 0;
730         cap->leaf.cman_head_drop_supported = false;
731         cap->leaf.cman_wred_context_private_supported = true;
732         cap->leaf.cman_wred_context_shared_n_max = 0;
733         cap->leaf.stats_mask = 0;
734
735         return 0;
736 }
737
738 static int
739 iavf_node_capabilities_get(struct rte_eth_dev *dev,
740                            uint32_t node_id,
741                            struct rte_tm_node_capabilities *cap,
742                            struct rte_tm_error *error)
743 {
744         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
745         enum iavf_tm_node_type node_type;
746         struct virtchnl_qos_cap_elem tc_cap;
747         struct iavf_tm_node *tm_node;
748
749         if (!cap || !error)
750                 return -EINVAL;
751
752         if (node_id == RTE_TM_NODE_ID_NULL) {
753                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
754                 error->message = "invalid node id";
755                 return -EINVAL;
756         }
757
758         /* check if the node id exists */
759         tm_node = iavf_tm_node_search(dev, node_id, &node_type);
760         if (!tm_node) {
761                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
762                 error->message = "no such node";
763                 return -EINVAL;
764         }
765
766         if (node_type != IAVF_TM_NODE_TYPE_TC) {
767                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
768                 error->message = "not support capability get";
769                 return -EINVAL;
770         }
771
772         tc_cap = vf->qos_cap->cap[tm_node->tc];
773         if (tc_cap.tc_num != tm_node->tc) {
774                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
775                 error->message = "tc not match";
776                 return -EINVAL;
777         }
778
779         cap->shaper_private_supported = true;
780         cap->shaper_private_dual_rate_supported = false;
781         /* Bytes per second */
782         cap->shaper_private_rate_min =
783                 (uint64_t)tc_cap.shaper.committed * 1000 / IAVF_BITS_PER_BYTE;
784         cap->shaper_private_rate_max =
785                 (uint64_t)tc_cap.shaper.peak * 1000 / IAVF_BITS_PER_BYTE;
786         cap->shaper_shared_n_max = 0;
787         cap->nonleaf.sched_n_children_max = vf->num_queue_pairs;
788         cap->nonleaf.sched_sp_n_priorities_max = 1;
789         cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
790         cap->nonleaf.sched_wfq_n_groups_max = 0;
791         cap->nonleaf.sched_wfq_weight_max = tc_cap.weight;
792         cap->stats_mask = 0;
793
794         return 0;
795 }
796
797 static int iavf_hierarchy_commit(struct rte_eth_dev *dev,
798                                  int clear_on_fail,
799                                  __rte_unused struct rte_tm_error *error)
800 {
801         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
802         struct iavf_adapter *adapter =
803                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
804         struct virtchnl_queue_tc_mapping *q_tc_mapping;
805         struct virtchnl_queues_bw_cfg *q_bw;
806         struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list;
807         struct iavf_tm_node *tm_node;
808         struct iavf_qtc_map *qtc_map;
809         uint16_t size, size_q;
810         int index = 0, node_committed = 0;
811         int i, ret_val = IAVF_SUCCESS;
812
813         /* check if port is stopped */
814         if (adapter->stopped != 1) {
815                 PMD_DRV_LOG(ERR, "Please stop port first");
816                 ret_val = IAVF_ERR_NOT_READY;
817                 goto err;
818         }
819
820         if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)) {
821                 PMD_DRV_LOG(ERR, "VF queue tc mapping is not supported");
822                 ret_val = IAVF_NOT_SUPPORTED;
823                 goto fail_clear;
824         }
825
826         /* check if all TC nodes are set with VF vsi */
827         if (vf->tm_conf.nb_tc_node != vf->qos_cap->num_elem) {
828                 PMD_DRV_LOG(ERR, "Does not set VF vsi nodes to all TCs");
829                 ret_val = IAVF_ERR_PARAM;
830                 goto fail_clear;
831         }
832
833         size = sizeof(*q_tc_mapping) + sizeof(q_tc_mapping->tc[0]) *
834                 (vf->qos_cap->num_elem - 1);
835         q_tc_mapping = rte_zmalloc("q_tc", size, 0);
836         if (!q_tc_mapping) {
837                 ret_val = IAVF_ERR_NO_MEMORY;
838                 goto fail_clear;
839         }
840
841         size_q = sizeof(*q_bw) + sizeof(q_bw->cfg[0]) *
842                 (vf->num_queue_pairs - 1);
843         q_bw = rte_zmalloc("q_bw", size_q, 0);
844         if (!q_bw) {
845                 ret_val = IAVF_ERR_NO_MEMORY;
846                 goto fail_clear;
847         }
848
849         q_tc_mapping->vsi_id = vf->vsi.vsi_id;
850         q_tc_mapping->num_tc = vf->qos_cap->num_elem;
851         q_tc_mapping->num_queue_pairs = vf->num_queue_pairs;
852
853         q_bw->vsi_id = vf->vsi.vsi_id;
854         q_bw->num_queues = vf->num_queue_pairs;
855
856         TAILQ_FOREACH(tm_node, queue_list, node) {
857                 if (tm_node->tc >= q_tc_mapping->num_tc) {
858                         PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc);
859                         ret_val = IAVF_ERR_PARAM;
860                         goto fail_clear;
861                 }
862                 q_tc_mapping->tc[tm_node->tc].req.queue_count++;
863
864                 if (tm_node->shaper_profile) {
865                         q_bw->cfg[node_committed].queue_id = node_committed;
866                         q_bw->cfg[node_committed].shaper.peak =
867                         tm_node->shaper_profile->profile.peak.rate /
868                         1000 * IAVF_BITS_PER_BYTE;
869                         q_bw->cfg[node_committed].shaper.committed =
870                         tm_node->shaper_profile->profile.committed.rate /
871                         1000 * IAVF_BITS_PER_BYTE;
872                         q_bw->cfg[node_committed].tc = tm_node->tc;
873                 }
874
875                 node_committed++;
876         }
877
878         /* All queues allocated to this VF should be mapped */
879         if (node_committed < vf->num_queue_pairs) {
880                 PMD_DRV_LOG(ERR, "queue node is less than allocated queue pairs");
881                 ret_val = IAVF_ERR_PARAM;
882                 goto fail_clear;
883         }
884
885         ret_val = iavf_set_q_bw(dev, q_bw, size_q);
886         if (ret_val)
887                 goto fail_clear;
888
889         /* store the queue TC mapping info */
890         qtc_map = rte_zmalloc("qtc_map",
891                 sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0);
892         if (!qtc_map)
893                 return IAVF_ERR_NO_MEMORY;
894
895         for (i = 0; i < q_tc_mapping->num_tc; i++) {
896                 q_tc_mapping->tc[i].req.start_queue_id = index;
897                 index += q_tc_mapping->tc[i].req.queue_count;
898                 qtc_map[i].tc = i;
899                 qtc_map[i].start_queue_id =
900                         q_tc_mapping->tc[i].req.start_queue_id;
901                 qtc_map[i].queue_count = q_tc_mapping->tc[i].req.queue_count;
902         }
903
904         ret_val = iavf_set_q_tc_map(dev, q_tc_mapping, size);
905         if (ret_val)
906                 goto fail_clear;
907
908         vf->qtc_map = qtc_map;
909         vf->tm_conf.committed = true;
910         return ret_val;
911
912 fail_clear:
913         /* clear all the traffic manager configuration */
914         if (clear_on_fail) {
915                 iavf_tm_conf_uninit(dev);
916                 iavf_tm_conf_init(dev);
917         }
918 err:
919         return ret_val;
920 }