net/ixgbe: support adding TM node
[dpdk.git] / drivers / net / ixgbe / ixgbe_tm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_malloc.h>
35
36 #include "ixgbe_ethdev.h"
37
38 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
39                                      struct rte_tm_capabilities *cap,
40                                      struct rte_tm_error *error);
41 static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
42                                     uint32_t shaper_profile_id,
43                                     struct rte_tm_shaper_params *profile,
44                                     struct rte_tm_error *error);
45 static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
46                                     uint32_t shaper_profile_id,
47                                     struct rte_tm_error *error);
48 static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
49                           uint32_t parent_node_id, uint32_t priority,
50                           uint32_t weight, uint32_t level_id,
51                           struct rte_tm_node_params *params,
52                           struct rte_tm_error *error);
53
54 const struct rte_tm_ops ixgbe_tm_ops = {
55         .capabilities_get = ixgbe_tm_capabilities_get,
56         .shaper_profile_add = ixgbe_shaper_profile_add,
57         .shaper_profile_delete = ixgbe_shaper_profile_del,
58         .node_add = ixgbe_node_add,
59 };
60
61 int
62 ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
63                  void *arg)
64 {
65         if (!arg)
66                 return -EINVAL;
67
68         *(const void **)arg = &ixgbe_tm_ops;
69
70         return 0;
71 }
72
73 void
74 ixgbe_tm_conf_init(struct rte_eth_dev *dev)
75 {
76         struct ixgbe_tm_conf *tm_conf =
77                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
78
79         /* initialize shaper profile list */
80         TAILQ_INIT(&tm_conf->shaper_profile_list);
81
82         /* initialize node configuration */
83         tm_conf->root = NULL;
84         TAILQ_INIT(&tm_conf->queue_list);
85         TAILQ_INIT(&tm_conf->tc_list);
86         tm_conf->nb_tc_node = 0;
87         tm_conf->nb_queue_node = 0;
88         tm_conf->committed = false;
89 }
90
91 void
92 ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
93 {
94         struct ixgbe_tm_conf *tm_conf =
95                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
96         struct ixgbe_tm_shaper_profile *shaper_profile;
97         struct ixgbe_tm_node *tm_node;
98
99         /* clear node configuration */
100         while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
101                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
102                 rte_free(tm_node);
103         }
104         tm_conf->nb_queue_node = 0;
105         while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
106                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
107                 rte_free(tm_node);
108         }
109         tm_conf->nb_tc_node = 0;
110         if (tm_conf->root) {
111                 rte_free(tm_conf->root);
112                 tm_conf->root = NULL;
113         }
114
115         /* Remove all shaper profiles */
116         while ((shaper_profile =
117                TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
118                 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
119                              shaper_profile, node);
120                 rte_free(shaper_profile);
121         }
122 }
123
124 static inline uint8_t
125 ixgbe_tc_nb_get(struct rte_eth_dev *dev)
126 {
127         struct rte_eth_conf *eth_conf;
128         uint8_t nb_tcs = 0;
129
130         eth_conf = &dev->data->dev_conf;
131         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
132                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
133         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
134                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
135                     ETH_32_POOLS)
136                         nb_tcs = ETH_4_TCS;
137                 else
138                         nb_tcs = ETH_8_TCS;
139         } else {
140                 nb_tcs = 1;
141         }
142
143         return nb_tcs;
144 }
145
146 static int
147 ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
148                           struct rte_tm_capabilities *cap,
149                           struct rte_tm_error *error)
150 {
151         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
152         uint8_t tc_nb = ixgbe_tc_nb_get(dev);
153
154         if (!cap || !error)
155                 return -EINVAL;
156
157         if (tc_nb > hw->mac.max_tx_queues)
158                 return -EINVAL;
159
160         error->type = RTE_TM_ERROR_TYPE_NONE;
161
162         /* set all the parameters to 0 first. */
163         memset(cap, 0, sizeof(struct rte_tm_capabilities));
164
165         /**
166          * here is the max capability not the current configuration.
167          */
168         /* port + TCs + queues */
169         cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
170                            hw->mac.max_tx_queues;
171         cap->n_levels_max = 3;
172         cap->non_leaf_nodes_identical = 1;
173         cap->leaf_nodes_identical = 1;
174         cap->shaper_n_max = cap->n_nodes_max;
175         cap->shaper_private_n_max = cap->n_nodes_max;
176         cap->shaper_private_dual_rate_n_max = 0;
177         cap->shaper_private_rate_min = 0;
178         /* 10Gbps -> 1.25GBps */
179         cap->shaper_private_rate_max = 1250000000ull;
180         cap->shaper_shared_n_max = 0;
181         cap->shaper_shared_n_nodes_per_shaper_max = 0;
182         cap->shaper_shared_n_shapers_per_node_max = 0;
183         cap->shaper_shared_dual_rate_n_max = 0;
184         cap->shaper_shared_rate_min = 0;
185         cap->shaper_shared_rate_max = 0;
186         cap->sched_n_children_max = hw->mac.max_tx_queues;
187         /**
188          * HW supports SP. But no plan to support it now.
189          * So, all the nodes should have the same priority.
190          */
191         cap->sched_sp_n_priorities_max = 1;
192         cap->sched_wfq_n_children_per_group_max = 0;
193         cap->sched_wfq_n_groups_max = 0;
194         /**
195          * SW only supports fair round robin now.
196          * So, all the nodes should have the same weight.
197          */
198         cap->sched_wfq_weight_max = 1;
199         cap->cman_head_drop_supported = 0;
200         cap->dynamic_update_mask = 0;
201         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
202         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
203         cap->cman_wred_context_n_max = 0;
204         cap->cman_wred_context_private_n_max = 0;
205         cap->cman_wred_context_shared_n_max = 0;
206         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
207         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
208         cap->stats_mask = 0;
209
210         return 0;
211 }
212
213 static inline struct ixgbe_tm_shaper_profile *
214 ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
215                             uint32_t shaper_profile_id)
216 {
217         struct ixgbe_tm_conf *tm_conf =
218                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
219         struct ixgbe_shaper_profile_list *shaper_profile_list =
220                 &tm_conf->shaper_profile_list;
221         struct ixgbe_tm_shaper_profile *shaper_profile;
222
223         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
224                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
225                         return shaper_profile;
226         }
227
228         return NULL;
229 }
230
231 static int
232 ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
233                                  struct rte_tm_error *error)
234 {
235         /* min rate not supported */
236         if (profile->committed.rate) {
237                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
238                 error->message = "committed rate not supported";
239                 return -EINVAL;
240         }
241         /* min bucket size not supported */
242         if (profile->committed.size) {
243                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
244                 error->message = "committed bucket size not supported";
245                 return -EINVAL;
246         }
247         /* max bucket size not supported */
248         if (profile->peak.size) {
249                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
250                 error->message = "peak bucket size not supported";
251                 return -EINVAL;
252         }
253         /* length adjustment not supported */
254         if (profile->pkt_length_adjust) {
255                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
256                 error->message = "packet length adjustment not supported";
257                 return -EINVAL;
258         }
259
260         return 0;
261 }
262
263 static int
264 ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
265                          uint32_t shaper_profile_id,
266                          struct rte_tm_shaper_params *profile,
267                          struct rte_tm_error *error)
268 {
269         struct ixgbe_tm_conf *tm_conf =
270                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
271         struct ixgbe_tm_shaper_profile *shaper_profile;
272         int ret;
273
274         if (!profile || !error)
275                 return -EINVAL;
276
277         ret = ixgbe_shaper_profile_param_check(profile, error);
278         if (ret)
279                 return ret;
280
281         shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
282
283         if (shaper_profile) {
284                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
285                 error->message = "profile ID exist";
286                 return -EINVAL;
287         }
288
289         shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
290                                      sizeof(struct ixgbe_tm_shaper_profile),
291                                      0);
292         if (!shaper_profile)
293                 return -ENOMEM;
294         shaper_profile->shaper_profile_id = shaper_profile_id;
295         (void)rte_memcpy(&shaper_profile->profile, profile,
296                          sizeof(struct rte_tm_shaper_params));
297         TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
298                           shaper_profile, node);
299
300         return 0;
301 }
302
303 static int
304 ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
305                          uint32_t shaper_profile_id,
306                          struct rte_tm_error *error)
307 {
308         struct ixgbe_tm_conf *tm_conf =
309                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
310         struct ixgbe_tm_shaper_profile *shaper_profile;
311
312         if (!error)
313                 return -EINVAL;
314
315         shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
316
317         if (!shaper_profile) {
318                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
319                 error->message = "profile ID not exist";
320                 return -EINVAL;
321         }
322
323         /* don't delete a profile if it's used by one or several nodes */
324         if (shaper_profile->reference_count) {
325                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
326                 error->message = "profile in use";
327                 return -EINVAL;
328         }
329
330         TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
331         rte_free(shaper_profile);
332
333         return 0;
334 }
335
336 static inline struct ixgbe_tm_node *
337 ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
338                      enum ixgbe_tm_node_type *node_type)
339 {
340         struct ixgbe_tm_conf *tm_conf =
341                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
342         struct ixgbe_tm_node *tm_node;
343
344         if (tm_conf->root && tm_conf->root->id == node_id) {
345                 *node_type = IXGBE_TM_NODE_TYPE_PORT;
346                 return tm_conf->root;
347         }
348
349         TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
350                 if (tm_node->id == node_id) {
351                         *node_type = IXGBE_TM_NODE_TYPE_TC;
352                         return tm_node;
353                 }
354         }
355
356         TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
357                 if (tm_node->id == node_id) {
358                         *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
359                         return tm_node;
360                 }
361         }
362
363         return NULL;
364 }
365
366 static void
367 ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
368                         uint16_t *base, uint16_t *nb)
369 {
370         uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
371         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
372         uint16_t vf_num = pci_dev->max_vfs;
373
374         *base = 0;
375         *nb = 0;
376
377         /* VT on */
378         if (vf_num) {
379                 /* no DCB */
380                 if (nb_tcs == 1) {
381                         if (vf_num >= ETH_32_POOLS) {
382                                 *nb = 2;
383                                 *base = vf_num * 2;
384                         } else if (vf_num >= ETH_16_POOLS) {
385                                 *nb = 4;
386                                 *base = vf_num * 4;
387                         } else {
388                                 *nb = 8;
389                                 *base = vf_num * 8;
390                         }
391                 } else {
392                         /* DCB */
393                         *nb = 1;
394                         *base = vf_num * nb_tcs + tc_node_no;
395                 }
396         } else {
397                 /* VT off */
398                 if (nb_tcs == ETH_8_TCS) {
399                         switch (tc_node_no) {
400                         case 0:
401                                 *base = 0;
402                                 *nb = 32;
403                                 break;
404                         case 1:
405                                 *base = 32;
406                                 *nb = 32;
407                                 break;
408                         case 2:
409                                 *base = 64;
410                                 *nb = 16;
411                                 break;
412                         case 3:
413                                 *base = 80;
414                                 *nb = 16;
415                                 break;
416                         case 4:
417                                 *base = 96;
418                                 *nb = 8;
419                                 break;
420                         case 5:
421                                 *base = 104;
422                                 *nb = 8;
423                                 break;
424                         case 6:
425                                 *base = 112;
426                                 *nb = 8;
427                                 break;
428                         case 7:
429                                 *base = 120;
430                                 *nb = 8;
431                                 break;
432                         default:
433                                 return;
434                         }
435                 } else {
436                         switch (tc_node_no) {
437                         /**
438                          * If no VF and no DCB, only 64 queues can be used.
439                          * This case also be covered by this "case 0".
440                          */
441                         case 0:
442                                 *base = 0;
443                                 *nb = 64;
444                                 break;
445                         case 1:
446                                 *base = 64;
447                                 *nb = 32;
448                                 break;
449                         case 2:
450                                 *base = 96;
451                                 *nb = 16;
452                                 break;
453                         case 3:
454                                 *base = 112;
455                                 *nb = 16;
456                                 break;
457                         default:
458                                 return;
459                         }
460                 }
461         }
462 }
463
464 static int
465 ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
466                        uint32_t priority, uint32_t weight,
467                        struct rte_tm_node_params *params,
468                        struct rte_tm_error *error)
469 {
470         if (node_id == RTE_TM_NODE_ID_NULL) {
471                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
472                 error->message = "invalid node id";
473                 return -EINVAL;
474         }
475
476         if (priority) {
477                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
478                 error->message = "priority should be 0";
479                 return -EINVAL;
480         }
481
482         if (weight != 1) {
483                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
484                 error->message = "weight must be 1";
485                 return -EINVAL;
486         }
487
488         /* not support shared shaper */
489         if (params->shared_shaper_id) {
490                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
491                 error->message = "shared shaper not supported";
492                 return -EINVAL;
493         }
494         if (params->n_shared_shapers) {
495                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
496                 error->message = "shared shaper not supported";
497                 return -EINVAL;
498         }
499
500         /* for root node */
501         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
502                 /* check the unsupported parameters */
503                 if (params->nonleaf.wfq_weight_mode) {
504                         error->type =
505                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
506                         error->message = "WFQ not supported";
507                         return -EINVAL;
508                 }
509                 if (params->nonleaf.n_sp_priorities != 1) {
510                         error->type =
511                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
512                         error->message = "SP priority not supported";
513                         return -EINVAL;
514                 } else if (params->nonleaf.wfq_weight_mode &&
515                            !(*params->nonleaf.wfq_weight_mode)) {
516                         error->type =
517                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
518                         error->message = "WFP should be byte mode";
519                         return -EINVAL;
520                 }
521
522                 return 0;
523         }
524
525         /* for TC or queue node */
526         /* check the unsupported parameters */
527         if (params->leaf.cman) {
528                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
529                 error->message = "Congestion management not supported";
530                 return -EINVAL;
531         }
532         if (params->leaf.wred.wred_profile_id !=
533             RTE_TM_WRED_PROFILE_ID_NONE) {
534                 error->type =
535                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
536                 error->message = "WRED not supported";
537                 return -EINVAL;
538         }
539         if (params->leaf.wred.shared_wred_context_id) {
540                 error->type =
541                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
542                 error->message = "WRED not supported";
543                 return -EINVAL;
544         }
545         if (params->leaf.wred.n_shared_wred_contexts) {
546                 error->type =
547                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
548                 error->message = "WRED not supported";
549                 return -EINVAL;
550         }
551
552         return 0;
553 }
554
555 /**
556  * Now the TC and queue configuration is controlled by DCB.
557  * We need check if the node configuration follows the DCB configuration.
558  * In the future, we may use TM to cover DCB.
559  */
560 static int
561 ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
562                uint32_t parent_node_id, uint32_t priority,
563                uint32_t weight, uint32_t level_id,
564                struct rte_tm_node_params *params,
565                struct rte_tm_error *error)
566 {
567         struct ixgbe_tm_conf *tm_conf =
568                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
569         enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
570         enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
571         struct ixgbe_tm_shaper_profile *shaper_profile;
572         struct ixgbe_tm_node *tm_node;
573         struct ixgbe_tm_node *parent_node;
574         uint8_t nb_tcs;
575         uint16_t q_base = 0;
576         uint16_t q_nb = 0;
577         int ret;
578
579         if (!params || !error)
580                 return -EINVAL;
581
582         /* if already committed */
583         if (tm_conf->committed) {
584                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
585                 error->message = "already committed";
586                 return -EINVAL;
587         }
588
589         ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
590                                      params, error);
591         if (ret)
592                 return ret;
593
594         /* check if the node ID is already used */
595         if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
596                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
597                 error->message = "node id already used";
598                 return -EINVAL;
599         }
600
601         /* check the shaper profile id */
602         shaper_profile = ixgbe_shaper_profile_search(dev,
603                                                      params->shaper_profile_id);
604         if (!shaper_profile) {
605                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
606                 error->message = "shaper profile not exist";
607                 return -EINVAL;
608         }
609
610         /* root node if not have a parent */
611         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
612                 /* check level */
613                 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
614                     level_id > IXGBE_TM_NODE_TYPE_PORT) {
615                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
616                         error->message = "Wrong level";
617                         return -EINVAL;
618                 }
619
620                 /* obviously no more than one root */
621                 if (tm_conf->root) {
622                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
623                         error->message = "already have a root";
624                         return -EINVAL;
625                 }
626
627                 /* add the root node */
628                 tm_node = rte_zmalloc("ixgbe_tm_node",
629                                       sizeof(struct ixgbe_tm_node),
630                                       0);
631                 if (!tm_node)
632                         return -ENOMEM;
633                 tm_node->id = node_id;
634                 tm_node->priority = priority;
635                 tm_node->weight = weight;
636                 tm_node->reference_count = 0;
637                 tm_node->no = 0;
638                 tm_node->parent = NULL;
639                 tm_node->shaper_profile = shaper_profile;
640                 (void)rte_memcpy(&tm_node->params, params,
641                                  sizeof(struct rte_tm_node_params));
642                 tm_conf->root = tm_node;
643
644                 /* increase the reference counter of the shaper profile */
645                 shaper_profile->reference_count++;
646
647                 return 0;
648         }
649
650         /* TC or queue node */
651         /* check the parent node */
652         parent_node = ixgbe_tm_node_search(dev, parent_node_id,
653                                            &parent_node_type);
654         if (!parent_node) {
655                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
656                 error->message = "parent not exist";
657                 return -EINVAL;
658         }
659         if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
660             parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
661                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
662                 error->message = "parent is not port or TC";
663                 return -EINVAL;
664         }
665         /* check level */
666         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
667             level_id != parent_node_type + 1) {
668                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
669                 error->message = "Wrong level";
670                 return -EINVAL;
671         }
672
673         /* check the node number */
674         if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
675                 /* check TC number */
676                 nb_tcs = ixgbe_tc_nb_get(dev);
677                 if (tm_conf->nb_tc_node >= nb_tcs) {
678                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
679                         error->message = "too many TCs";
680                         return -EINVAL;
681                 }
682         } else {
683                 /* check queue number */
684                 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
685                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
686                         error->message = "too many queues";
687                         return -EINVAL;
688                 }
689
690                 ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
691                 if (parent_node->reference_count >= q_nb) {
692                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
693                         error->message = "too many queues than TC supported";
694                         return -EINVAL;
695                 }
696
697                 /**
698                  * check the node id.
699                  * For queue, the node id means queue id.
700                  */
701                 if (node_id >= dev->data->nb_tx_queues) {
702                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
703                         error->message = "too large queue id";
704                         return -EINVAL;
705                 }
706         }
707
708         /* add the TC or queue node */
709         tm_node = rte_zmalloc("ixgbe_tm_node",
710                               sizeof(struct ixgbe_tm_node),
711                               0);
712         if (!tm_node)
713                 return -ENOMEM;
714         tm_node->id = node_id;
715         tm_node->priority = priority;
716         tm_node->weight = weight;
717         tm_node->reference_count = 0;
718         tm_node->parent = parent_node;
719         tm_node->shaper_profile = shaper_profile;
720         (void)rte_memcpy(&tm_node->params, params,
721                          sizeof(struct rte_tm_node_params));
722         if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
723                 tm_node->no = parent_node->reference_count;
724                 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
725                                   tm_node, node);
726                 tm_conf->nb_tc_node++;
727         } else {
728                 tm_node->no = q_base + parent_node->reference_count;
729                 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
730                                   tm_node, node);
731                 tm_conf->nb_queue_node++;
732         }
733         tm_node->parent->reference_count++;
734
735         /* increase the reference counter of the shaper profile */
736         shaper_profile->reference_count++;
737
738         return 0;
739 }