0e1ccf02850b5d48a81caa0d36e2f9509f8a5861
[dpdk.git] / drivers / net / ixgbe / ixgbe_tm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_malloc.h>
35
36 #include "ixgbe_ethdev.h"
37
38 static int ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
39                                      struct rte_tm_capabilities *cap,
40                                      struct rte_tm_error *error);
41 static int ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
42                                     uint32_t shaper_profile_id,
43                                     struct rte_tm_shaper_params *profile,
44                                     struct rte_tm_error *error);
45 static int ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
46                                     uint32_t shaper_profile_id,
47                                     struct rte_tm_error *error);
48 static int ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
49                           uint32_t parent_node_id, uint32_t priority,
50                           uint32_t weight, uint32_t level_id,
51                           struct rte_tm_node_params *params,
52                           struct rte_tm_error *error);
53 static int ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
54                              struct rte_tm_error *error);
55 static int ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
56                                int *is_leaf, struct rte_tm_error *error);
57
58 const struct rte_tm_ops ixgbe_tm_ops = {
59         .capabilities_get = ixgbe_tm_capabilities_get,
60         .shaper_profile_add = ixgbe_shaper_profile_add,
61         .shaper_profile_delete = ixgbe_shaper_profile_del,
62         .node_add = ixgbe_node_add,
63         .node_delete = ixgbe_node_delete,
64         .node_type_get = ixgbe_node_type_get,
65 };
66
67 int
68 ixgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
69                  void *arg)
70 {
71         if (!arg)
72                 return -EINVAL;
73
74         *(const void **)arg = &ixgbe_tm_ops;
75
76         return 0;
77 }
78
79 void
80 ixgbe_tm_conf_init(struct rte_eth_dev *dev)
81 {
82         struct ixgbe_tm_conf *tm_conf =
83                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
84
85         /* initialize shaper profile list */
86         TAILQ_INIT(&tm_conf->shaper_profile_list);
87
88         /* initialize node configuration */
89         tm_conf->root = NULL;
90         TAILQ_INIT(&tm_conf->queue_list);
91         TAILQ_INIT(&tm_conf->tc_list);
92         tm_conf->nb_tc_node = 0;
93         tm_conf->nb_queue_node = 0;
94         tm_conf->committed = false;
95 }
96
97 void
98 ixgbe_tm_conf_uninit(struct rte_eth_dev *dev)
99 {
100         struct ixgbe_tm_conf *tm_conf =
101                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
102         struct ixgbe_tm_shaper_profile *shaper_profile;
103         struct ixgbe_tm_node *tm_node;
104
105         /* clear node configuration */
106         while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
107                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
108                 rte_free(tm_node);
109         }
110         tm_conf->nb_queue_node = 0;
111         while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
112                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
113                 rte_free(tm_node);
114         }
115         tm_conf->nb_tc_node = 0;
116         if (tm_conf->root) {
117                 rte_free(tm_conf->root);
118                 tm_conf->root = NULL;
119         }
120
121         /* Remove all shaper profiles */
122         while ((shaper_profile =
123                TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
124                 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
125                              shaper_profile, node);
126                 rte_free(shaper_profile);
127         }
128 }
129
130 static inline uint8_t
131 ixgbe_tc_nb_get(struct rte_eth_dev *dev)
132 {
133         struct rte_eth_conf *eth_conf;
134         uint8_t nb_tcs = 0;
135
136         eth_conf = &dev->data->dev_conf;
137         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
138                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
139         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
140                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
141                     ETH_32_POOLS)
142                         nb_tcs = ETH_4_TCS;
143                 else
144                         nb_tcs = ETH_8_TCS;
145         } else {
146                 nb_tcs = 1;
147         }
148
149         return nb_tcs;
150 }
151
152 static int
153 ixgbe_tm_capabilities_get(struct rte_eth_dev *dev,
154                           struct rte_tm_capabilities *cap,
155                           struct rte_tm_error *error)
156 {
157         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
158         uint8_t tc_nb = ixgbe_tc_nb_get(dev);
159
160         if (!cap || !error)
161                 return -EINVAL;
162
163         if (tc_nb > hw->mac.max_tx_queues)
164                 return -EINVAL;
165
166         error->type = RTE_TM_ERROR_TYPE_NONE;
167
168         /* set all the parameters to 0 first. */
169         memset(cap, 0, sizeof(struct rte_tm_capabilities));
170
171         /**
172          * here is the max capability not the current configuration.
173          */
174         /* port + TCs + queues */
175         cap->n_nodes_max = 1 + IXGBE_DCB_MAX_TRAFFIC_CLASS +
176                            hw->mac.max_tx_queues;
177         cap->n_levels_max = 3;
178         cap->non_leaf_nodes_identical = 1;
179         cap->leaf_nodes_identical = 1;
180         cap->shaper_n_max = cap->n_nodes_max;
181         cap->shaper_private_n_max = cap->n_nodes_max;
182         cap->shaper_private_dual_rate_n_max = 0;
183         cap->shaper_private_rate_min = 0;
184         /* 10Gbps -> 1.25GBps */
185         cap->shaper_private_rate_max = 1250000000ull;
186         cap->shaper_shared_n_max = 0;
187         cap->shaper_shared_n_nodes_per_shaper_max = 0;
188         cap->shaper_shared_n_shapers_per_node_max = 0;
189         cap->shaper_shared_dual_rate_n_max = 0;
190         cap->shaper_shared_rate_min = 0;
191         cap->shaper_shared_rate_max = 0;
192         cap->sched_n_children_max = hw->mac.max_tx_queues;
193         /**
194          * HW supports SP. But no plan to support it now.
195          * So, all the nodes should have the same priority.
196          */
197         cap->sched_sp_n_priorities_max = 1;
198         cap->sched_wfq_n_children_per_group_max = 0;
199         cap->sched_wfq_n_groups_max = 0;
200         /**
201          * SW only supports fair round robin now.
202          * So, all the nodes should have the same weight.
203          */
204         cap->sched_wfq_weight_max = 1;
205         cap->cman_head_drop_supported = 0;
206         cap->dynamic_update_mask = 0;
207         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
208         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
209         cap->cman_wred_context_n_max = 0;
210         cap->cman_wred_context_private_n_max = 0;
211         cap->cman_wred_context_shared_n_max = 0;
212         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
213         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
214         cap->stats_mask = 0;
215
216         return 0;
217 }
218
219 static inline struct ixgbe_tm_shaper_profile *
220 ixgbe_shaper_profile_search(struct rte_eth_dev *dev,
221                             uint32_t shaper_profile_id)
222 {
223         struct ixgbe_tm_conf *tm_conf =
224                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
225         struct ixgbe_shaper_profile_list *shaper_profile_list =
226                 &tm_conf->shaper_profile_list;
227         struct ixgbe_tm_shaper_profile *shaper_profile;
228
229         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
230                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
231                         return shaper_profile;
232         }
233
234         return NULL;
235 }
236
237 static int
238 ixgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
239                                  struct rte_tm_error *error)
240 {
241         /* min rate not supported */
242         if (profile->committed.rate) {
243                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
244                 error->message = "committed rate not supported";
245                 return -EINVAL;
246         }
247         /* min bucket size not supported */
248         if (profile->committed.size) {
249                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
250                 error->message = "committed bucket size not supported";
251                 return -EINVAL;
252         }
253         /* max bucket size not supported */
254         if (profile->peak.size) {
255                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
256                 error->message = "peak bucket size not supported";
257                 return -EINVAL;
258         }
259         /* length adjustment not supported */
260         if (profile->pkt_length_adjust) {
261                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
262                 error->message = "packet length adjustment not supported";
263                 return -EINVAL;
264         }
265
266         return 0;
267 }
268
269 static int
270 ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
271                          uint32_t shaper_profile_id,
272                          struct rte_tm_shaper_params *profile,
273                          struct rte_tm_error *error)
274 {
275         struct ixgbe_tm_conf *tm_conf =
276                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
277         struct ixgbe_tm_shaper_profile *shaper_profile;
278         int ret;
279
280         if (!profile || !error)
281                 return -EINVAL;
282
283         ret = ixgbe_shaper_profile_param_check(profile, error);
284         if (ret)
285                 return ret;
286
287         shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
288
289         if (shaper_profile) {
290                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
291                 error->message = "profile ID exist";
292                 return -EINVAL;
293         }
294
295         shaper_profile = rte_zmalloc("ixgbe_tm_shaper_profile",
296                                      sizeof(struct ixgbe_tm_shaper_profile),
297                                      0);
298         if (!shaper_profile)
299                 return -ENOMEM;
300         shaper_profile->shaper_profile_id = shaper_profile_id;
301         (void)rte_memcpy(&shaper_profile->profile, profile,
302                          sizeof(struct rte_tm_shaper_params));
303         TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
304                           shaper_profile, node);
305
306         return 0;
307 }
308
309 static int
310 ixgbe_shaper_profile_del(struct rte_eth_dev *dev,
311                          uint32_t shaper_profile_id,
312                          struct rte_tm_error *error)
313 {
314         struct ixgbe_tm_conf *tm_conf =
315                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
316         struct ixgbe_tm_shaper_profile *shaper_profile;
317
318         if (!error)
319                 return -EINVAL;
320
321         shaper_profile = ixgbe_shaper_profile_search(dev, shaper_profile_id);
322
323         if (!shaper_profile) {
324                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
325                 error->message = "profile ID not exist";
326                 return -EINVAL;
327         }
328
329         /* don't delete a profile if it's used by one or several nodes */
330         if (shaper_profile->reference_count) {
331                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
332                 error->message = "profile in use";
333                 return -EINVAL;
334         }
335
336         TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
337         rte_free(shaper_profile);
338
339         return 0;
340 }
341
342 static inline struct ixgbe_tm_node *
343 ixgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
344                      enum ixgbe_tm_node_type *node_type)
345 {
346         struct ixgbe_tm_conf *tm_conf =
347                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
348         struct ixgbe_tm_node *tm_node;
349
350         if (tm_conf->root && tm_conf->root->id == node_id) {
351                 *node_type = IXGBE_TM_NODE_TYPE_PORT;
352                 return tm_conf->root;
353         }
354
355         TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
356                 if (tm_node->id == node_id) {
357                         *node_type = IXGBE_TM_NODE_TYPE_TC;
358                         return tm_node;
359                 }
360         }
361
362         TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
363                 if (tm_node->id == node_id) {
364                         *node_type = IXGBE_TM_NODE_TYPE_QUEUE;
365                         return tm_node;
366                 }
367         }
368
369         return NULL;
370 }
371
372 static void
373 ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
374                         uint16_t *base, uint16_t *nb)
375 {
376         uint8_t nb_tcs = ixgbe_tc_nb_get(dev);
377         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
378         uint16_t vf_num = pci_dev->max_vfs;
379
380         *base = 0;
381         *nb = 0;
382
383         /* VT on */
384         if (vf_num) {
385                 /* no DCB */
386                 if (nb_tcs == 1) {
387                         if (vf_num >= ETH_32_POOLS) {
388                                 *nb = 2;
389                                 *base = vf_num * 2;
390                         } else if (vf_num >= ETH_16_POOLS) {
391                                 *nb = 4;
392                                 *base = vf_num * 4;
393                         } else {
394                                 *nb = 8;
395                                 *base = vf_num * 8;
396                         }
397                 } else {
398                         /* DCB */
399                         *nb = 1;
400                         *base = vf_num * nb_tcs + tc_node_no;
401                 }
402         } else {
403                 /* VT off */
404                 if (nb_tcs == ETH_8_TCS) {
405                         switch (tc_node_no) {
406                         case 0:
407                                 *base = 0;
408                                 *nb = 32;
409                                 break;
410                         case 1:
411                                 *base = 32;
412                                 *nb = 32;
413                                 break;
414                         case 2:
415                                 *base = 64;
416                                 *nb = 16;
417                                 break;
418                         case 3:
419                                 *base = 80;
420                                 *nb = 16;
421                                 break;
422                         case 4:
423                                 *base = 96;
424                                 *nb = 8;
425                                 break;
426                         case 5:
427                                 *base = 104;
428                                 *nb = 8;
429                                 break;
430                         case 6:
431                                 *base = 112;
432                                 *nb = 8;
433                                 break;
434                         case 7:
435                                 *base = 120;
436                                 *nb = 8;
437                                 break;
438                         default:
439                                 return;
440                         }
441                 } else {
442                         switch (tc_node_no) {
443                         /**
444                          * If no VF and no DCB, only 64 queues can be used.
445                          * This case also be covered by this "case 0".
446                          */
447                         case 0:
448                                 *base = 0;
449                                 *nb = 64;
450                                 break;
451                         case 1:
452                                 *base = 64;
453                                 *nb = 32;
454                                 break;
455                         case 2:
456                                 *base = 96;
457                                 *nb = 16;
458                                 break;
459                         case 3:
460                                 *base = 112;
461                                 *nb = 16;
462                                 break;
463                         default:
464                                 return;
465                         }
466                 }
467         }
468 }
469
470 static int
471 ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
472                        uint32_t priority, uint32_t weight,
473                        struct rte_tm_node_params *params,
474                        struct rte_tm_error *error)
475 {
476         if (node_id == RTE_TM_NODE_ID_NULL) {
477                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
478                 error->message = "invalid node id";
479                 return -EINVAL;
480         }
481
482         if (priority) {
483                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
484                 error->message = "priority should be 0";
485                 return -EINVAL;
486         }
487
488         if (weight != 1) {
489                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
490                 error->message = "weight must be 1";
491                 return -EINVAL;
492         }
493
494         /* not support shared shaper */
495         if (params->shared_shaper_id) {
496                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
497                 error->message = "shared shaper not supported";
498                 return -EINVAL;
499         }
500         if (params->n_shared_shapers) {
501                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
502                 error->message = "shared shaper not supported";
503                 return -EINVAL;
504         }
505
506         /* for root node */
507         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
508                 /* check the unsupported parameters */
509                 if (params->nonleaf.wfq_weight_mode) {
510                         error->type =
511                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
512                         error->message = "WFQ not supported";
513                         return -EINVAL;
514                 }
515                 if (params->nonleaf.n_sp_priorities != 1) {
516                         error->type =
517                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
518                         error->message = "SP priority not supported";
519                         return -EINVAL;
520                 } else if (params->nonleaf.wfq_weight_mode &&
521                            !(*params->nonleaf.wfq_weight_mode)) {
522                         error->type =
523                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
524                         error->message = "WFP should be byte mode";
525                         return -EINVAL;
526                 }
527
528                 return 0;
529         }
530
531         /* for TC or queue node */
532         /* check the unsupported parameters */
533         if (params->leaf.cman) {
534                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
535                 error->message = "Congestion management not supported";
536                 return -EINVAL;
537         }
538         if (params->leaf.wred.wred_profile_id !=
539             RTE_TM_WRED_PROFILE_ID_NONE) {
540                 error->type =
541                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
542                 error->message = "WRED not supported";
543                 return -EINVAL;
544         }
545         if (params->leaf.wred.shared_wred_context_id) {
546                 error->type =
547                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
548                 error->message = "WRED not supported";
549                 return -EINVAL;
550         }
551         if (params->leaf.wred.n_shared_wred_contexts) {
552                 error->type =
553                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
554                 error->message = "WRED not supported";
555                 return -EINVAL;
556         }
557
558         return 0;
559 }
560
561 /**
562  * Now the TC and queue configuration is controlled by DCB.
563  * We need check if the node configuration follows the DCB configuration.
564  * In the future, we may use TM to cover DCB.
565  */
566 static int
567 ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
568                uint32_t parent_node_id, uint32_t priority,
569                uint32_t weight, uint32_t level_id,
570                struct rte_tm_node_params *params,
571                struct rte_tm_error *error)
572 {
573         struct ixgbe_tm_conf *tm_conf =
574                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
575         enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
576         enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
577         struct ixgbe_tm_shaper_profile *shaper_profile;
578         struct ixgbe_tm_node *tm_node;
579         struct ixgbe_tm_node *parent_node;
580         uint8_t nb_tcs;
581         uint16_t q_base = 0;
582         uint16_t q_nb = 0;
583         int ret;
584
585         if (!params || !error)
586                 return -EINVAL;
587
588         /* if already committed */
589         if (tm_conf->committed) {
590                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
591                 error->message = "already committed";
592                 return -EINVAL;
593         }
594
595         ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
596                                      params, error);
597         if (ret)
598                 return ret;
599
600         /* check if the node ID is already used */
601         if (ixgbe_tm_node_search(dev, node_id, &node_type)) {
602                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
603                 error->message = "node id already used";
604                 return -EINVAL;
605         }
606
607         /* check the shaper profile id */
608         shaper_profile = ixgbe_shaper_profile_search(dev,
609                                                      params->shaper_profile_id);
610         if (!shaper_profile) {
611                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
612                 error->message = "shaper profile not exist";
613                 return -EINVAL;
614         }
615
616         /* root node if not have a parent */
617         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
618                 /* check level */
619                 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
620                     level_id > IXGBE_TM_NODE_TYPE_PORT) {
621                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
622                         error->message = "Wrong level";
623                         return -EINVAL;
624                 }
625
626                 /* obviously no more than one root */
627                 if (tm_conf->root) {
628                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
629                         error->message = "already have a root";
630                         return -EINVAL;
631                 }
632
633                 /* add the root node */
634                 tm_node = rte_zmalloc("ixgbe_tm_node",
635                                       sizeof(struct ixgbe_tm_node),
636                                       0);
637                 if (!tm_node)
638                         return -ENOMEM;
639                 tm_node->id = node_id;
640                 tm_node->priority = priority;
641                 tm_node->weight = weight;
642                 tm_node->reference_count = 0;
643                 tm_node->no = 0;
644                 tm_node->parent = NULL;
645                 tm_node->shaper_profile = shaper_profile;
646                 (void)rte_memcpy(&tm_node->params, params,
647                                  sizeof(struct rte_tm_node_params));
648                 tm_conf->root = tm_node;
649
650                 /* increase the reference counter of the shaper profile */
651                 shaper_profile->reference_count++;
652
653                 return 0;
654         }
655
656         /* TC or queue node */
657         /* check the parent node */
658         parent_node = ixgbe_tm_node_search(dev, parent_node_id,
659                                            &parent_node_type);
660         if (!parent_node) {
661                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
662                 error->message = "parent not exist";
663                 return -EINVAL;
664         }
665         if (parent_node_type != IXGBE_TM_NODE_TYPE_PORT &&
666             parent_node_type != IXGBE_TM_NODE_TYPE_TC) {
667                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
668                 error->message = "parent is not port or TC";
669                 return -EINVAL;
670         }
671         /* check level */
672         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
673             level_id != parent_node_type + 1) {
674                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
675                 error->message = "Wrong level";
676                 return -EINVAL;
677         }
678
679         /* check the node number */
680         if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
681                 /* check TC number */
682                 nb_tcs = ixgbe_tc_nb_get(dev);
683                 if (tm_conf->nb_tc_node >= nb_tcs) {
684                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
685                         error->message = "too many TCs";
686                         return -EINVAL;
687                 }
688         } else {
689                 /* check queue number */
690                 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
691                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
692                         error->message = "too many queues";
693                         return -EINVAL;
694                 }
695
696                 ixgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
697                 if (parent_node->reference_count >= q_nb) {
698                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
699                         error->message = "too many queues than TC supported";
700                         return -EINVAL;
701                 }
702
703                 /**
704                  * check the node id.
705                  * For queue, the node id means queue id.
706                  */
707                 if (node_id >= dev->data->nb_tx_queues) {
708                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
709                         error->message = "too large queue id";
710                         return -EINVAL;
711                 }
712         }
713
714         /* add the TC or queue node */
715         tm_node = rte_zmalloc("ixgbe_tm_node",
716                               sizeof(struct ixgbe_tm_node),
717                               0);
718         if (!tm_node)
719                 return -ENOMEM;
720         tm_node->id = node_id;
721         tm_node->priority = priority;
722         tm_node->weight = weight;
723         tm_node->reference_count = 0;
724         tm_node->parent = parent_node;
725         tm_node->shaper_profile = shaper_profile;
726         (void)rte_memcpy(&tm_node->params, params,
727                          sizeof(struct rte_tm_node_params));
728         if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
729                 tm_node->no = parent_node->reference_count;
730                 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
731                                   tm_node, node);
732                 tm_conf->nb_tc_node++;
733         } else {
734                 tm_node->no = q_base + parent_node->reference_count;
735                 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
736                                   tm_node, node);
737                 tm_conf->nb_queue_node++;
738         }
739         tm_node->parent->reference_count++;
740
741         /* increase the reference counter of the shaper profile */
742         shaper_profile->reference_count++;
743
744         return 0;
745 }
746
747 static int
748 ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
749                   struct rte_tm_error *error)
750 {
751         struct ixgbe_tm_conf *tm_conf =
752                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
753         enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
754         struct ixgbe_tm_node *tm_node;
755
756         if (!error)
757                 return -EINVAL;
758
759         /* if already committed */
760         if (tm_conf->committed) {
761                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
762                 error->message = "already committed";
763                 return -EINVAL;
764         }
765
766         if (node_id == RTE_TM_NODE_ID_NULL) {
767                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
768                 error->message = "invalid node id";
769                 return -EINVAL;
770         }
771
772         /* check the if the node id exists */
773         tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
774         if (!tm_node) {
775                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
776                 error->message = "no such node";
777                 return -EINVAL;
778         }
779
780         /* the node should have no child */
781         if (tm_node->reference_count) {
782                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
783                 error->message =
784                         "cannot delete a node which has children";
785                 return -EINVAL;
786         }
787
788         /* root node */
789         if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
790                 tm_node->shaper_profile->reference_count--;
791                 rte_free(tm_node);
792                 tm_conf->root = NULL;
793                 return 0;
794         }
795
796         /* TC or queue node */
797         tm_node->shaper_profile->reference_count--;
798         tm_node->parent->reference_count--;
799         if (node_type == IXGBE_TM_NODE_TYPE_TC) {
800                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
801                 tm_conf->nb_tc_node--;
802         } else {
803                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
804                 tm_conf->nb_queue_node--;
805         }
806         rte_free(tm_node);
807
808         return 0;
809 }
810
811 static int
812 ixgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
813                     int *is_leaf, struct rte_tm_error *error)
814 {
815         enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
816         struct ixgbe_tm_node *tm_node;
817
818         if (!is_leaf || !error)
819                 return -EINVAL;
820
821         if (node_id == RTE_TM_NODE_ID_NULL) {
822                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
823                 error->message = "invalid node id";
824                 return -EINVAL;
825         }
826
827         /* check if the node id exists */
828         tm_node = ixgbe_tm_node_search(dev, node_id, &node_type);
829         if (!tm_node) {
830                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
831                 error->message = "no such node";
832                 return -EINVAL;
833         }
834
835         if (node_type == IXGBE_TM_NODE_TYPE_QUEUE)
836                 *is_leaf = true;
837         else
838                 *is_leaf = false;
839
840         return 0;
841 }