net/ixgbe: support getting TM level capability
[dpdk.git] / drivers / net / ixgbe / ixgbe_tm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_malloc.h>
35
36 #include "ixgbe_ethdev.h"
37
38 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
39                                      struct rte_tm_capabilities *cap,
40                                      struct rte_tm_error *error);
41 static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
42                                     uint32_t shaper_profile_id,
43                                     struct rte_tm_shaper_params *profile,
44                                     struct rte_tm_error *error);
45 static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
46                                     uint32_t shaper_profile_id,
47                                     struct rte_tm_error *error);
48 static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
49                           uint32_t parent_node_id, uint32_t priority,
50                           uint32_t weight, uint32_t level_id,
51                           struct rte_tm_node_params *params,
52                           struct rte_tm_error *error);
53 static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
54                              struct rte_tm_error *error);
55 static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
56                                int *is_leaf, struct rte_tm_error *error);
57 static int ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
58                                         uint32_t level_id,
59                                         struct rte_tm_level_capabilities *cap,
60                                         struct rte_tm_error *error);
61
62 const struct rte_tm_ops ixgbe_tm_ops = {
63         .capabilities_get = ixgbe_tm_capabilities_get,
64         .shaper_profile_add = ixgbe_shaper_profile_add,
65         .shaper_profile_delete = ixgbe_shaper_profile_del,
66         .node_add = ixgbe_node_add,
67         .node_delete = ixgbe_node_delete,
68         .node_type_get = ixgbe_node_type_get,
69         .level_capabilities_get = ixgbe_level_capabilities_get,
70 };
71
72 int
73 ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
74                  void *arg)
75 {
76         if (!arg)
77                 return -EINVAL;
78
79         *(const void **)arg = &ixgbe_tm_ops;
80
81         return 0;
82 }
83
84 void
85 ixgbe_tm_conf_init(struct rte_eth_dev *dev)
86 {
87         struct ixgbe_tm_conf *tm_conf =
88                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
89
90         /* initialize shaper profile list */
91         TAILQ_INIT(&tm_conf->shaper_profile_list);
92
93         /* initialize node configuration */
94         tm_conf->root = NULL;
95         TAILQ_INIT(&tm_conf->queue_list);
96         TAILQ_INIT(&tm_conf->tc_list);
97         tm_conf->nb_tc_node = 0;
98         tm_conf->nb_queue_node = 0;
99         tm_conf->committed = false;
100 }
101
102 void
103 ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
104 {
105         struct ixgbe_tm_conf *tm_conf =
106                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
107         struct ixgbe_tm_shaper_profile *shaper_profile;
108         struct ixgbe_tm_node *tm_node;
109
110         /* clear node configuration */
111         while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
112                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
113                 rte_free(tm_node);
114         }
115         tm_conf->nb_queue_node = 0;
116         while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
117                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
118                 rte_free(tm_node);
119         }
120         tm_conf->nb_tc_node = 0;
121         if (tm_conf->root) {
122                 rte_free(tm_conf->root);
123                 tm_conf->root = NULL;
124         }
125
126         /* Remove all shaper profiles */
127         while ((shaper_profile =
128                TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
129                 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
130                              shaper_profile, node);
131                 rte_free(shaper_profile);
132         }
133 }
134
135 static inline uint8_t
136 ixgbe_tc_nb_get(struct rte_eth_dev *dev)
137 {
138         struct rte_eth_conf *eth_conf;
139         uint8_t nb_tcs = 0;
140
141         eth_conf = &dev->data->dev_conf;
142         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
143                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
144         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
145                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
146                     ETH_32_POOLS)
147                         nb_tcs = ETH_4_TCS;
148                 else
149                         nb_tcs = ETH_8_TCS;
150         } else {
151                 nb_tcs = 1;
152         }
153
154         return nb_tcs;
155 }
156
157 static int
158 ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
159                           struct rte_tm_capabilities *cap,
160                           struct rte_tm_error *error)
161 {
162         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
163         uint8_t tc_nb = ixgbe_tc_nb_get(dev);
164
165         if (!cap || !error)
166                 return -EINVAL;
167
168         if (tc_nb > hw->mac.max_tx_queues)
169                 return -EINVAL;
170
171         error->type = RTE_TM_ERROR_TYPE_NONE;
172
173         /* set all the parameters to 0 first. */
174         memset(cap, 0, sizeof(struct rte_tm_capabilities));
175
176         /**
177          * here is the max capability not the current configuration.
178          */
179         /* port + TCs + queues */
180         cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
181                            hw->mac.max_tx_queues;
182         cap->n_levels_max = 3;
183         cap->non_leaf_nodes_identical = 1;
184         cap->leaf_nodes_identical = 1;
185         cap->shaper_n_max = cap->n_nodes_max;
186         cap->shaper_private_n_max = cap->n_nodes_max;
187         cap->shaper_private_dual_rate_n_max = 0;
188         cap->shaper_private_rate_min = 0;
189         /* 10Gbps -> 1.25GBps */
190         cap->shaper_private_rate_max = 1250000000ull;
191         cap->shaper_shared_n_max = 0;
192         cap->shaper_shared_n_nodes_per_shaper_max = 0;
193         cap->shaper_shared_n_shapers_per_node_max = 0;
194         cap->shaper_shared_dual_rate_n_max = 0;
195         cap->shaper_shared_rate_min = 0;
196         cap->shaper_shared_rate_max = 0;
197         cap->sched_n_children_max = hw->mac.max_tx_queues;
198         /**
199          * HW supports SP. But no plan to support it now.
200          * So, all the nodes should have the same priority.
201          */
202         cap->sched_sp_n_priorities_max = 1;
203         cap->sched_wfq_n_children_per_group_max = 0;
204         cap->sched_wfq_n_groups_max = 0;
205         /**
206          * SW only supports fair round robin now.
207          * So, all the nodes should have the same weight.
208          */
209         cap->sched_wfq_weight_max = 1;
210         cap->cman_head_drop_supported = 0;
211         cap->dynamic_update_mask = 0;
212         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
213         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
214         cap->cman_wred_context_n_max = 0;
215         cap->cman_wred_context_private_n_max = 0;
216         cap->cman_wred_context_shared_n_max = 0;
217         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
218         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
219         cap->stats_mask = 0;
220
221         return 0;
222 }
223
224 static inline struct ixgbe_tm_shaper_profile *
225 ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
226                             uint32_t shaper_profile_id)
227 {
228         struct ixgbe_tm_conf *tm_conf =
229                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
230         struct ixgbe_shaper_profile_list *shaper_profile_list =
231                 &tm_conf->shaper_profile_list;
232         struct ixgbe_tm_shaper_profile *shaper_profile;
233
234         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
235                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
236                         return shaper_profile;
237         }
238
239         return NULL;
240 }
241
242 static int
243 ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
244                                  struct rte_tm_error *error)
245 {
246         /* min rate not supported */
247         if (profile->committed.rate) {
248                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
249                 error->message = "committed rate not supported";
250                 return -EINVAL;
251         }
252         /* min bucket size not supported */
253         if (profile->committed.size) {
254                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
255                 error->message = "committed bucket size not supported";
256                 return -EINVAL;
257         }
258         /* max bucket size not supported */
259         if (profile->peak.size) {
260                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
261                 error->message = "peak bucket size not supported";
262                 return -EINVAL;
263         }
264         /* length adjustment not supported */
265         if (profile->pkt_length_adjust) {
266                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
267                 error->message = "packet length adjustment not supported";
268                 return -EINVAL;
269         }
270
271         return 0;
272 }
273
274 static int
275 ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
276                          uint32_t shaper_profile_id,
277                          struct rte_tm_shaper_params *profile,
278                          struct rte_tm_error *error)
279 {
280         struct ixgbe_tm_conf *tm_conf =
281                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
282         struct ixgbe_tm_shaper_profile *shaper_profile;
283         int ret;
284
285         if (!profile || !error)
286                 return -EINVAL;
287
288         ret = ixgbe_shaper_profile_param_check(profile, error);
289         if (ret)
290                 return ret;
291
292         shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
293
294         if (shaper_profile) {
295                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
296                 error->message = "profile ID exist";
297                 return -EINVAL;
298         }
299
300         shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
301                                      sizeof(struct ixgbe_tm_shaper_profile),
302                                      0);
303         if (!shaper_profile)
304                 return -ENOMEM;
305         shaper_profile->shaper_profile_id = shaper_profile_id;
306         (void)rte_memcpy(&shaper_profile->profile, profile,
307                          sizeof(struct rte_tm_shaper_params));
308         TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
309                           shaper_profile, node);
310
311         return 0;
312 }
313
314 static int
315 ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
316                          uint32_t shaper_profile_id,
317                          struct rte_tm_error *error)
318 {
319         struct ixgbe_tm_conf *tm_conf =
320                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
321         struct ixgbe_tm_shaper_profile *shaper_profile;
322
323         if (!error)
324                 return -EINVAL;
325
326         shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
327
328         if (!shaper_profile) {
329                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
330                 error->message = "profile ID not exist";
331                 return -EINVAL;
332         }
333
334         /* don't delete a profile if it's used by one or several nodes */
335         if (shaper_profile->reference_count) {
336                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
337                 error->message = "profile in use";
338                 return -EINVAL;
339         }
340
341         TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
342         rte_free(shaper_profile);
343
344         return 0;
345 }
346
347 static inline struct ixgbe_tm_node *
348 ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
349                      enum ixgbe_tm_node_type *node_type)
350 {
351         struct ixgbe_tm_conf *tm_conf =
352                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
353         struct ixgbe_tm_node *tm_node;
354
355         if (tm_conf->root && tm_conf->root->id == node_id) {
356                 *node_type = IXGBE_TM_NODE_TYPE_PORT;
357                 return tm_conf->root;
358         }
359
360         TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
361                 if (tm_node->id == node_id) {
362                         *node_type = IXGBE_TM_NODE_TYPE_TC;
363                         return tm_node;
364                 }
365         }
366
367         TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
368                 if (tm_node->id == node_id) {
369                         *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
370                         return tm_node;
371                 }
372         }
373
374         return NULL;
375 }
376
377 static void
378 ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
379                         uint16_t *base, uint16_t *nb)
380 {
381         uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
382         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
383         uint16_t vf_num = pci_dev->max_vfs;
384
385         *base = 0;
386         *nb = 0;
387
388         /* VT on */
389         if (vf_num) {
390                 /* no DCB */
391                 if (nb_tcs == 1) {
392                         if (vf_num >= ETH_32_POOLS) {
393                                 *nb = 2;
394                                 *base = vf_num * 2;
395                         } else if (vf_num >= ETH_16_POOLS) {
396                                 *nb = 4;
397                                 *base = vf_num * 4;
398                         } else {
399                                 *nb = 8;
400                                 *base = vf_num * 8;
401                         }
402                 } else {
403                         /* DCB */
404                         *nb = 1;
405                         *base = vf_num * nb_tcs + tc_node_no;
406                 }
407         } else {
408                 /* VT off */
409                 if (nb_tcs == ETH_8_TCS) {
410                         switch (tc_node_no) {
411                         case 0:
412                                 *base = 0;
413                                 *nb = 32;
414                                 break;
415                         case 1:
416                                 *base = 32;
417                                 *nb = 32;
418                                 break;
419                         case 2:
420                                 *base = 64;
421                                 *nb = 16;
422                                 break;
423                         case 3:
424                                 *base = 80;
425                                 *nb = 16;
426                                 break;
427                         case 4:
428                                 *base = 96;
429                                 *nb = 8;
430                                 break;
431                         case 5:
432                                 *base = 104;
433                                 *nb = 8;
434                                 break;
435                         case 6:
436                                 *base = 112;
437                                 *nb = 8;
438                                 break;
439                         case 7:
440                                 *base = 120;
441                                 *nb = 8;
442                                 break;
443                         default:
444                                 return;
445                         }
446                 } else {
447                         switch (tc_node_no) {
448                         /**
449                          * If no VF and no DCB, only 64 queues can be used.
450                          * This case also be covered by this "case 0".
451                          */
452                         case 0:
453                                 *base = 0;
454                                 *nb = 64;
455                                 break;
456                         case 1:
457                                 *base = 64;
458                                 *nb = 32;
459                                 break;
460                         case 2:
461                                 *base = 96;
462                                 *nb = 16;
463                                 break;
464                         case 3:
465                                 *base = 112;
466                                 *nb = 16;
467                                 break;
468                         default:
469                                 return;
470                         }
471                 }
472         }
473 }
474
475 static int
476 ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
477                        uint32_t priority, uint32_t weight,
478                        struct rte_tm_node_params *params,
479                        struct rte_tm_error *error)
480 {
481         if (node_id == RTE_TM_NODE_ID_NULL) {
482                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
483                 error->message = "invalid node id";
484                 return -EINVAL;
485         }
486
487         if (priority) {
488                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
489                 error->message = "priority should be 0";
490                 return -EINVAL;
491         }
492
493         if (weight != 1) {
494                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
495                 error->message = "weight must be 1";
496                 return -EINVAL;
497         }
498
499         /* not support shared shaper */
500         if (params->shared_shaper_id) {
501                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
502                 error->message = "shared shaper not supported";
503                 return -EINVAL;
504         }
505         if (params->n_shared_shapers) {
506                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
507                 error->message = "shared shaper not supported";
508                 return -EINVAL;
509         }
510
511         /* for root node */
512         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
513                 /* check the unsupported parameters */
514                 if (params->nonleaf.wfq_weight_mode) {
515                         error->type =
516                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
517                         error->message = "WFQ not supported";
518                         return -EINVAL;
519                 }
520                 if (params->nonleaf.n_sp_priorities != 1) {
521                         error->type =
522                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
523                         error->message = "SP priority not supported";
524                         return -EINVAL;
525                 } else if (params->nonleaf.wfq_weight_mode &&
526                            !(*params->nonleaf.wfq_weight_mode)) {
527                         error->type =
528                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
529                         error->message = "WFP should be byte mode";
530                         return -EINVAL;
531                 }
532
533                 return 0;
534         }
535
536         /* for TC or queue node */
537         /* check the unsupported parameters */
538         if (params->leaf.cman) {
539                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
540                 error->message = "Congestion management not supported";
541                 return -EINVAL;
542         }
543         if (params->leaf.wred.wred_profile_id !=
544             RTE_TM_WRED_PROFILE_ID_NONE) {
545                 error->type =
546                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
547                 error->message = "WRED not supported";
548                 return -EINVAL;
549         }
550         if (params->leaf.wred.shared_wred_context_id) {
551                 error->type =
552                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
553                 error->message = "WRED not supported";
554                 return -EINVAL;
555         }
556         if (params->leaf.wred.n_shared_wred_contexts) {
557                 error->type =
558                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
559                 error->message = "WRED not supported";
560                 return -EINVAL;
561         }
562
563         return 0;
564 }
565
566 /**
567  * Now the TC and queue configuration is controlled by DCB.
568  * We need check if the node configuration follows the DCB configuration.
569  * In the future, we may use TM to cover DCB.
570  */
571 static int
572 ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
573                uint32_t parent_node_id, uint32_t priority,
574                uint32_t weight, uint32_t level_id,
575                struct rte_tm_node_params *params,
576                struct rte_tm_error *error)
577 {
578         struct ixgbe_tm_conf *tm_conf =
579                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
580         enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
581         enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
582         struct ixgbe_tm_shaper_profile *shaper_profile;
583         struct ixgbe_tm_node *tm_node;
584         struct ixgbe_tm_node *parent_node;
585         uint8_t nb_tcs;
586         uint16_t q_base = 0;
587         uint16_t q_nb = 0;
588         int ret;
589
590         if (!params || !error)
591                 return -EINVAL;
592
593         /* if already committed */
594         if (tm_conf->committed) {
595                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
596                 error->message = "already committed";
597                 return -EINVAL;
598         }
599
600         ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
601                                      params, error);
602         if (ret)
603                 return ret;
604
605         /* check if the node ID is already used */
606         if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
607                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
608                 error->message = "node id already used";
609                 return -EINVAL;
610         }
611
612         /* check the shaper profile id */
613         shaper_profile = ixgbe_shaper_profile_search(dev,
614                                                      params->shaper_profile_id);
615         if (!shaper_profile) {
616                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
617                 error->message = "shaper profile not exist";
618                 return -EINVAL;
619         }
620
621         /* root node if not have a parent */
622         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
623                 /* check level */
624                 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
625                     level_id > IXGBE_TM_NODE_TYPE_PORT) {
626                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
627                         error->message = "Wrong level";
628                         return -EINVAL;
629                 }
630
631                 /* obviously no more than one root */
632                 if (tm_conf->root) {
633                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
634                         error->message = "already have a root";
635                         return -EINVAL;
636                 }
637
638                 /* add the root node */
639                 tm_node = rte_zmalloc("ixgbe_tm_node",
640                                       sizeof(struct ixgbe_tm_node),
641                                       0);
642                 if (!tm_node)
643                         return -ENOMEM;
644                 tm_node->id = node_id;
645                 tm_node->priority = priority;
646                 tm_node->weight = weight;
647                 tm_node->reference_count = 0;
648                 tm_node->no = 0;
649                 tm_node->parent = NULL;
650                 tm_node->shaper_profile = shaper_profile;
651                 (void)rte_memcpy(&tm_node->params, params,
652                                  sizeof(struct rte_tm_node_params));
653                 tm_conf->root = tm_node;
654
655                 /* increase the reference counter of the shaper profile */
656                 shaper_profile->reference_count++;
657
658                 return 0;
659         }
660
661         /* TC or queue node */
662         /* check the parent node */
663         parent_node = ixgbe_tm_node_search(dev, parent_node_id,
664                                            &parent_node_type);
665         if (!parent_node) {
666                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
667                 error->message = "parent not exist";
668                 return -EINVAL;
669         }
670         if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
671             parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
672                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
673                 error->message = "parent is not port or TC";
674                 return -EINVAL;
675         }
676         /* check level */
677         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
678             level_id != parent_node_type + 1) {
679                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
680                 error->message = "Wrong level";
681                 return -EINVAL;
682         }
683
684         /* check the node number */
685         if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
686                 /* check TC number */
687                 nb_tcs = ixgbe_tc_nb_get(dev);
688                 if (tm_conf->nb_tc_node >= nb_tcs) {
689                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
690                         error->message = "too many TCs";
691                         return -EINVAL;
692                 }
693         } else {
694                 /* check queue number */
695                 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
696                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
697                         error->message = "too many queues";
698                         return -EINVAL;
699                 }
700
701                 ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
702                 if (parent_node->reference_count >= q_nb) {
703                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
704                         error->message = "too many queues than TC supported";
705                         return -EINVAL;
706                 }
707
708                 /**
709                  * check the node id.
710                  * For queue, the node id means queue id.
711                  */
712                 if (node_id >= dev->data->nb_tx_queues) {
713                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
714                         error->message = "too large queue id";
715                         return -EINVAL;
716                 }
717         }
718
719         /* add the TC or queue node */
720         tm_node = rte_zmalloc("ixgbe_tm_node",
721                               sizeof(struct ixgbe_tm_node),
722                               0);
723         if (!tm_node)
724                 return -ENOMEM;
725         tm_node->id = node_id;
726         tm_node->priority = priority;
727         tm_node->weight = weight;
728         tm_node->reference_count = 0;
729         tm_node->parent = parent_node;
730         tm_node->shaper_profile = shaper_profile;
731         (void)rte_memcpy(&tm_node->params, params,
732                          sizeof(struct rte_tm_node_params));
733         if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
734                 tm_node->no = parent_node->reference_count;
735                 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
736                                   tm_node, node);
737                 tm_conf->nb_tc_node++;
738         } else {
739                 tm_node->no = q_base + parent_node->reference_count;
740                 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
741                                   tm_node, node);
742                 tm_conf->nb_queue_node++;
743         }
744         tm_node->parent->reference_count++;
745
746         /* increase the reference counter of the shaper profile */
747         shaper_profile->reference_count++;
748
749         return 0;
750 }
751
752 static int
753 ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
754                   struct rte_tm_error *error)
755 {
756         struct ixgbe_tm_conf *tm_conf =
757                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
758         enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
759         struct ixgbe_tm_node *tm_node;
760
761         if (!error)
762                 return -EINVAL;
763
764         /* if already committed */
765         if (tm_conf->committed) {
766                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
767                 error->message = "already committed";
768                 return -EINVAL;
769         }
770
771         if (node_id == RTE_TM_NODE_ID_NULL) {
772                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
773                 error->message = "invalid node id";
774                 return -EINVAL;
775         }
776
777         /* check the if the node id exists */
778         tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
779         if (!tm_node) {
780                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
781                 error->message = "no such node";
782                 return -EINVAL;
783         }
784
785         /* the node should have no child */
786         if (tm_node->reference_count) {
787                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
788                 error->message =
789                         "cannot delete a node which has children";
790                 return -EINVAL;
791         }
792
793         /* root node */
794         if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
795                 tm_node->shaper_profile->reference_count--;
796                 rte_free(tm_node);
797                 tm_conf->root = NULL;
798                 return 0;
799         }
800
801         /* TC or queue node */
802         tm_node->shaper_profile->reference_count--;
803         tm_node->parent->reference_count--;
804         if (node_type == IXGBE_TM_NODE_TYPE_TC) {
805                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
806                 tm_conf->nb_tc_node--;
807         } else {
808                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
809                 tm_conf->nb_queue_node--;
810         }
811         rte_free(tm_node);
812
813         return 0;
814 }
815
816 static int
817 ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
818                     int *is_leaf, struct rte_tm_error *error)
819 {
820         enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
821         struct ixgbe_tm_node *tm_node;
822
823         if (!is_leaf || !error)
824                 return -EINVAL;
825
826         if (node_id == RTE_TM_NODE_ID_NULL) {
827                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
828                 error->message = "invalid node id";
829                 return -EINVAL;
830         }
831
832         /* check if the node id exists */
833         tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
834         if (!tm_node) {
835                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
836                 error->message = "no such node";
837                 return -EINVAL;
838         }
839
840         if (node_type == IXGBE_TM_NODE_TYPE_QUEUE)
841                 *is_leaf = true;
842         else
843                 *is_leaf = false;
844
845         return 0;
846 }
847
848 static int
849 ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
850                              uint32_t level_id,
851                              struct rte_tm_level_capabilities *cap,
852                              struct rte_tm_error *error)
853 {
854         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
855
856         if (!cap || !error)
857                 return -EINVAL;
858
859         if (level_id >= IXGBE_TM_NODE_TYPE_MAX) {
860                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
861                 error->message = "too deep level";
862                 return -EINVAL;
863         }
864
865         /* root node */
866         if (level_id == IXGBE_TM_NODE_TYPE_PORT) {
867                 cap->n_nodes_max = 1;
868                 cap->n_nodes_nonleaf_max = 1;
869                 cap->n_nodes_leaf_max = 0;
870                 cap->non_leaf_nodes_identical = true;
871                 cap->leaf_nodes_identical = true;
872                 cap->nonleaf.shaper_private_supported = true;
873                 cap->nonleaf.shaper_private_dual_rate_supported = false;
874                 cap->nonleaf.shaper_private_rate_min = 0;
875                 /* 10Gbps -> 1.25GBps */
876                 cap->nonleaf.shaper_private_rate_max = 1250000000ull;
877                 cap->nonleaf.shaper_shared_n_max = 0;
878                 cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
879                 cap->nonleaf.sched_sp_n_priorities_max = 1;
880                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
881                 cap->nonleaf.sched_wfq_n_groups_max = 0;
882                 cap->nonleaf.sched_wfq_weight_max = 1;
883                 cap->nonleaf.stats_mask = 0;
884
885                 return 0;
886         }
887
888         /* TC or queue node */
889         if (level_id == IXGBE_TM_NODE_TYPE_TC) {
890                 /* TC */
891                 cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
892                 cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
893                 cap->n_nodes_leaf_max = 0;
894                 cap->non_leaf_nodes_identical = true;
895         } else {
896                 /* queue */
897                 cap->n_nodes_max = hw->mac.max_tx_queues;
898                 cap->n_nodes_nonleaf_max = 0;
899                 cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
900                 cap->non_leaf_nodes_identical = true;
901         }
902         cap->leaf_nodes_identical = true;
903         cap->leaf.shaper_private_supported = true;
904         cap->leaf.shaper_private_dual_rate_supported = false;
905         cap->leaf.shaper_private_rate_min = 0;
906         /* 10Gbps -> 1.25GBps */
907         cap->leaf.shaper_private_rate_max = 1250000000ull;
908         cap->leaf.shaper_shared_n_max = 0;
909         cap->leaf.cman_head_drop_supported = false;
910         cap->leaf.cman_wred_context_private_supported = true;
911         cap->leaf.cman_wred_context_shared_n_max = 0;
912         cap->leaf.stats_mask = 0;
913
914         return 0;
915 }