6dd593e54e075d8cacedc9ac0a7b9affc0c9b041
[dpdk.git] / drivers / net / txgbe / txgbe_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <rte_malloc.h>
6
7 #include "txgbe_ethdev.h"
8
9 static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
10                                      struct rte_tm_capabilities *cap,
11                                      struct rte_tm_error *error);
12 static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
13                                     uint32_t shaper_profile_id,
14                                     struct rte_tm_shaper_params *profile,
15                                     struct rte_tm_error *error);
16 static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
17                                     uint32_t shaper_profile_id,
18                                     struct rte_tm_error *error);
19 static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
20                           uint32_t parent_node_id, uint32_t priority,
21                           uint32_t weight, uint32_t level_id,
22                           struct rte_tm_node_params *params,
23                           struct rte_tm_error *error);
24 static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
25                              struct rte_tm_error *error);
26 static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
27                                int *is_leaf, struct rte_tm_error *error);
28 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
29                                         uint32_t level_id,
30                                         struct rte_tm_level_capabilities *cap,
31                                         struct rte_tm_error *error);
32 static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
33                                        uint32_t node_id,
34                                        struct rte_tm_node_capabilities *cap,
35                                        struct rte_tm_error *error);
36
37 const struct rte_tm_ops txgbe_tm_ops = {
38         .capabilities_get = txgbe_tm_capabilities_get,
39         .shaper_profile_add = txgbe_shaper_profile_add,
40         .shaper_profile_delete = txgbe_shaper_profile_del,
41         .node_add = txgbe_node_add,
42         .node_delete = txgbe_node_delete,
43         .node_type_get = txgbe_node_type_get,
44         .level_capabilities_get = txgbe_level_capabilities_get,
45         .node_capabilities_get = txgbe_node_capabilities_get,
46 };
47
48 int
49 txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
50                  void *arg)
51 {
52         if (!arg)
53                 return -EINVAL;
54
55         *(const void **)arg = &txgbe_tm_ops;
56
57         return 0;
58 }
59
60 void
61 txgbe_tm_conf_init(struct rte_eth_dev *dev)
62 {
63         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
64
65         /* initialize shaper profile list */
66         TAILQ_INIT(&tm_conf->shaper_profile_list);
67
68         /* initialize node configuration */
69         tm_conf->root = NULL;
70         TAILQ_INIT(&tm_conf->queue_list);
71         TAILQ_INIT(&tm_conf->tc_list);
72         tm_conf->nb_tc_node = 0;
73         tm_conf->nb_queue_node = 0;
74         tm_conf->committed = false;
75 }
76
77 void
78 txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
79 {
80         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
81         struct txgbe_tm_shaper_profile *shaper_profile;
82         struct txgbe_tm_node *tm_node;
83
84         /* clear node configuration */
85         while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
86                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
87                 rte_free(tm_node);
88         }
89         tm_conf->nb_queue_node = 0;
90         while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
91                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
92                 rte_free(tm_node);
93         }
94         tm_conf->nb_tc_node = 0;
95         if (tm_conf->root) {
96                 rte_free(tm_conf->root);
97                 tm_conf->root = NULL;
98         }
99
100         /* Remove all shaper profiles */
101         while ((shaper_profile =
102                TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
103                 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
104                              shaper_profile, node);
105                 rte_free(shaper_profile);
106         }
107 }
108
109 static inline uint8_t
110 txgbe_tc_nb_get(struct rte_eth_dev *dev)
111 {
112         struct rte_eth_conf *eth_conf;
113         uint8_t nb_tcs = 0;
114
115         eth_conf = &dev->data->dev_conf;
116         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
117                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
118         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
119                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
120                     ETH_32_POOLS)
121                         nb_tcs = ETH_4_TCS;
122                 else
123                         nb_tcs = ETH_8_TCS;
124         } else {
125                 nb_tcs = 1;
126         }
127
128         return nb_tcs;
129 }
130
131 static int
132 txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
133                           struct rte_tm_capabilities *cap,
134                           struct rte_tm_error *error)
135 {
136         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
137         uint8_t tc_nb = txgbe_tc_nb_get(dev);
138
139         if (!cap || !error)
140                 return -EINVAL;
141
142         if (tc_nb > hw->mac.max_tx_queues)
143                 return -EINVAL;
144
145         error->type = RTE_TM_ERROR_TYPE_NONE;
146
147         /* set all the parameters to 0 first. */
148         memset(cap, 0, sizeof(struct rte_tm_capabilities));
149
150         /**
151          * here is the max capability not the current configuration.
152          */
153         /* port + TCs + queues */
154         cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
155                            hw->mac.max_tx_queues;
156         cap->n_levels_max = 3;
157         cap->non_leaf_nodes_identical = 1;
158         cap->leaf_nodes_identical = 1;
159         cap->shaper_n_max = cap->n_nodes_max;
160         cap->shaper_private_n_max = cap->n_nodes_max;
161         cap->shaper_private_dual_rate_n_max = 0;
162         cap->shaper_private_rate_min = 0;
163         /* 10Gbps -> 1.25GBps */
164         cap->shaper_private_rate_max = 1250000000ull;
165         cap->shaper_shared_n_max = 0;
166         cap->shaper_shared_n_nodes_per_shaper_max = 0;
167         cap->shaper_shared_n_shapers_per_node_max = 0;
168         cap->shaper_shared_dual_rate_n_max = 0;
169         cap->shaper_shared_rate_min = 0;
170         cap->shaper_shared_rate_max = 0;
171         cap->sched_n_children_max = hw->mac.max_tx_queues;
172         /**
173          * HW supports SP. But no plan to support it now.
174          * So, all the nodes should have the same priority.
175          */
176         cap->sched_sp_n_priorities_max = 1;
177         cap->sched_wfq_n_children_per_group_max = 0;
178         cap->sched_wfq_n_groups_max = 0;
179         /**
180          * SW only supports fair round robin now.
181          * So, all the nodes should have the same weight.
182          */
183         cap->sched_wfq_weight_max = 1;
184         cap->cman_head_drop_supported = 0;
185         cap->dynamic_update_mask = 0;
186         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
187         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
188         cap->cman_wred_context_n_max = 0;
189         cap->cman_wred_context_private_n_max = 0;
190         cap->cman_wred_context_shared_n_max = 0;
191         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
192         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
193         cap->stats_mask = 0;
194
195         return 0;
196 }
197
198 static inline struct txgbe_tm_shaper_profile *
199 txgbe_shaper_profile_search(struct rte_eth_dev *dev,
200                             uint32_t shaper_profile_id)
201 {
202         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
203         struct txgbe_shaper_profile_list *shaper_profile_list =
204                 &tm_conf->shaper_profile_list;
205         struct txgbe_tm_shaper_profile *shaper_profile;
206
207         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
208                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
209                         return shaper_profile;
210         }
211
212         return NULL;
213 }
214
215 static int
216 txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
217                                  struct rte_tm_error *error)
218 {
219         /* min rate not supported */
220         if (profile->committed.rate) {
221                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
222                 error->message = "committed rate not supported";
223                 return -EINVAL;
224         }
225         /* min bucket size not supported */
226         if (profile->committed.size) {
227                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
228                 error->message = "committed bucket size not supported";
229                 return -EINVAL;
230         }
231         /* max bucket size not supported */
232         if (profile->peak.size) {
233                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
234                 error->message = "peak bucket size not supported";
235                 return -EINVAL;
236         }
237         /* length adjustment not supported */
238         if (profile->pkt_length_adjust) {
239                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
240                 error->message = "packet length adjustment not supported";
241                 return -EINVAL;
242         }
243
244         return 0;
245 }
246
247 static int
248 txgbe_shaper_profile_add(struct rte_eth_dev *dev,
249                          uint32_t shaper_profile_id,
250                          struct rte_tm_shaper_params *profile,
251                          struct rte_tm_error *error)
252 {
253         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
254         struct txgbe_tm_shaper_profile *shaper_profile;
255         int ret;
256
257         if (!profile || !error)
258                 return -EINVAL;
259
260         ret = txgbe_shaper_profile_param_check(profile, error);
261         if (ret)
262                 return ret;
263
264         shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
265
266         if (shaper_profile) {
267                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
268                 error->message = "profile ID exist";
269                 return -EINVAL;
270         }
271
272         shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile",
273                                      sizeof(struct txgbe_tm_shaper_profile),
274                                      0);
275         if (!shaper_profile)
276                 return -ENOMEM;
277         shaper_profile->shaper_profile_id = shaper_profile_id;
278         rte_memcpy(&shaper_profile->profile, profile,
279                          sizeof(struct rte_tm_shaper_params));
280         TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
281                           shaper_profile, node);
282
283         return 0;
284 }
285
286 static int
287 txgbe_shaper_profile_del(struct rte_eth_dev *dev,
288                          uint32_t shaper_profile_id,
289                          struct rte_tm_error *error)
290 {
291         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
292         struct txgbe_tm_shaper_profile *shaper_profile;
293
294         if (!error)
295                 return -EINVAL;
296
297         shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
298
299         if (!shaper_profile) {
300                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
301                 error->message = "profile ID not exist";
302                 return -EINVAL;
303         }
304
305         /* don't delete a profile if it's used by one or several nodes */
306         if (shaper_profile->reference_count) {
307                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
308                 error->message = "profile in use";
309                 return -EINVAL;
310         }
311
312         TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
313         rte_free(shaper_profile);
314
315         return 0;
316 }
317
318 static inline struct txgbe_tm_node *
319 txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
320                      enum txgbe_tm_node_type *node_type)
321 {
322         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
323         struct txgbe_tm_node *tm_node;
324
325         if (tm_conf->root && tm_conf->root->id == node_id) {
326                 *node_type = TXGBE_TM_NODE_TYPE_PORT;
327                 return tm_conf->root;
328         }
329
330         TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
331                 if (tm_node->id == node_id) {
332                         *node_type = TXGBE_TM_NODE_TYPE_TC;
333                         return tm_node;
334                 }
335         }
336
337         TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
338                 if (tm_node->id == node_id) {
339                         *node_type = TXGBE_TM_NODE_TYPE_QUEUE;
340                         return tm_node;
341                 }
342         }
343
344         return NULL;
345 }
346
347 static void
348 txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
349                         uint16_t *base, uint16_t *nb)
350 {
351         uint8_t nb_tcs = txgbe_tc_nb_get(dev);
352         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
353         uint16_t vf_num = pci_dev->max_vfs;
354
355         *base = 0;
356         *nb = 0;
357
358         /* VT on */
359         if (vf_num) {
360                 /* no DCB */
361                 if (nb_tcs == 1) {
362                         if (vf_num >= ETH_32_POOLS) {
363                                 *nb = 2;
364                                 *base = vf_num * 2;
365                         } else if (vf_num >= ETH_16_POOLS) {
366                                 *nb = 4;
367                                 *base = vf_num * 4;
368                         } else {
369                                 *nb = 8;
370                                 *base = vf_num * 8;
371                         }
372                 } else {
373                         /* DCB */
374                         *nb = 1;
375                         *base = vf_num * nb_tcs + tc_node_no;
376                 }
377         } else {
378                 /* VT off */
379                 if (nb_tcs == ETH_8_TCS) {
380                         switch (tc_node_no) {
381                         case 0:
382                                 *base = 0;
383                                 *nb = 32;
384                                 break;
385                         case 1:
386                                 *base = 32;
387                                 *nb = 32;
388                                 break;
389                         case 2:
390                                 *base = 64;
391                                 *nb = 16;
392                                 break;
393                         case 3:
394                                 *base = 80;
395                                 *nb = 16;
396                                 break;
397                         case 4:
398                                 *base = 96;
399                                 *nb = 8;
400                                 break;
401                         case 5:
402                                 *base = 104;
403                                 *nb = 8;
404                                 break;
405                         case 6:
406                                 *base = 112;
407                                 *nb = 8;
408                                 break;
409                         case 7:
410                                 *base = 120;
411                                 *nb = 8;
412                                 break;
413                         default:
414                                 return;
415                         }
416                 } else {
417                         switch (tc_node_no) {
418                         /**
419                          * If no VF and no DCB, only 64 queues can be used.
420                          * This case also be covered by this "case 0".
421                          */
422                         case 0:
423                                 *base = 0;
424                                 *nb = 64;
425                                 break;
426                         case 1:
427                                 *base = 64;
428                                 *nb = 32;
429                                 break;
430                         case 2:
431                                 *base = 96;
432                                 *nb = 16;
433                                 break;
434                         case 3:
435                                 *base = 112;
436                                 *nb = 16;
437                                 break;
438                         default:
439                                 return;
440                         }
441                 }
442         }
443 }
444
445 static int
446 txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
447                        uint32_t priority, uint32_t weight,
448                        struct rte_tm_node_params *params,
449                        struct rte_tm_error *error)
450 {
451         if (node_id == RTE_TM_NODE_ID_NULL) {
452                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
453                 error->message = "invalid node id";
454                 return -EINVAL;
455         }
456
457         if (priority) {
458                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
459                 error->message = "priority should be 0";
460                 return -EINVAL;
461         }
462
463         if (weight != 1) {
464                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
465                 error->message = "weight must be 1";
466                 return -EINVAL;
467         }
468
469         /* not support shared shaper */
470         if (params->shared_shaper_id) {
471                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
472                 error->message = "shared shaper not supported";
473                 return -EINVAL;
474         }
475         if (params->n_shared_shapers) {
476                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
477                 error->message = "shared shaper not supported";
478                 return -EINVAL;
479         }
480
481         /* for non-leaf node */
482         if (node_id >= dev->data->nb_tx_queues) {
483                 /* check the unsupported parameters */
484                 if (params->nonleaf.wfq_weight_mode) {
485                         error->type =
486                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
487                         error->message = "WFQ not supported";
488                         return -EINVAL;
489                 }
490                 if (params->nonleaf.n_sp_priorities != 1) {
491                         error->type =
492                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
493                         error->message = "SP priority not supported";
494                         return -EINVAL;
495                 } else if (params->nonleaf.wfq_weight_mode &&
496                            !(*params->nonleaf.wfq_weight_mode)) {
497                         error->type =
498                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
499                         error->message = "WFP should be byte mode";
500                         return -EINVAL;
501                 }
502
503                 return 0;
504         }
505
506         /* for leaf node */
507         /* check the unsupported parameters */
508         if (params->leaf.cman) {
509                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
510                 error->message = "Congestion management not supported";
511                 return -EINVAL;
512         }
513         if (params->leaf.wred.wred_profile_id !=
514             RTE_TM_WRED_PROFILE_ID_NONE) {
515                 error->type =
516                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
517                 error->message = "WRED not supported";
518                 return -EINVAL;
519         }
520         if (params->leaf.wred.shared_wred_context_id) {
521                 error->type =
522                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
523                 error->message = "WRED not supported";
524                 return -EINVAL;
525         }
526         if (params->leaf.wred.n_shared_wred_contexts) {
527                 error->type =
528                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
529                 error->message = "WRED not supported";
530                 return -EINVAL;
531         }
532
533         return 0;
534 }
535
536 /**
537  * Now the TC and queue configuration is controlled by DCB.
538  * We need check if the node configuration follows the DCB configuration.
539  * In the future, we may use TM to cover DCB.
540  */
541 static int
542 txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
543                uint32_t parent_node_id, uint32_t priority,
544                uint32_t weight, uint32_t level_id,
545                struct rte_tm_node_params *params,
546                struct rte_tm_error *error)
547 {
548         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
549         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
550         enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX;
551         struct txgbe_tm_shaper_profile *shaper_profile = NULL;
552         struct txgbe_tm_node *tm_node;
553         struct txgbe_tm_node *parent_node;
554         uint8_t nb_tcs;
555         uint16_t q_base = 0;
556         uint16_t q_nb = 0;
557         int ret;
558
559         if (!params || !error)
560                 return -EINVAL;
561
562         /* if already committed */
563         if (tm_conf->committed) {
564                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
565                 error->message = "already committed";
566                 return -EINVAL;
567         }
568
569         ret = txgbe_node_param_check(dev, node_id, priority, weight,
570                                      params, error);
571         if (ret)
572                 return ret;
573
574         /* check if the node ID is already used */
575         if (txgbe_tm_node_search(dev, node_id, &node_type)) {
576                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
577                 error->message = "node id already used";
578                 return -EINVAL;
579         }
580
581         /* check the shaper profile id */
582         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
583                 shaper_profile = txgbe_shaper_profile_search(dev,
584                                         params->shaper_profile_id);
585                 if (!shaper_profile) {
586                         error->type =
587                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
588                         error->message = "shaper profile not exist";
589                         return -EINVAL;
590                 }
591         }
592
593         /* root node if not have a parent */
594         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
595                 /* check level */
596                 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
597                     level_id > TXGBE_TM_NODE_TYPE_PORT) {
598                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
599                         error->message = "Wrong level";
600                         return -EINVAL;
601                 }
602
603                 /* obviously no more than one root */
604                 if (tm_conf->root) {
605                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
606                         error->message = "already have a root";
607                         return -EINVAL;
608                 }
609
610                 /* add the root node */
611                 tm_node = rte_zmalloc("txgbe_tm_node",
612                                       sizeof(struct txgbe_tm_node),
613                                       0);
614                 if (!tm_node)
615                         return -ENOMEM;
616                 tm_node->id = node_id;
617                 tm_node->priority = priority;
618                 tm_node->weight = weight;
619                 tm_node->reference_count = 0;
620                 tm_node->no = 0;
621                 tm_node->parent = NULL;
622                 tm_node->shaper_profile = shaper_profile;
623                 rte_memcpy(&tm_node->params, params,
624                                  sizeof(struct rte_tm_node_params));
625                 tm_conf->root = tm_node;
626
627                 /* increase the reference counter of the shaper profile */
628                 if (shaper_profile)
629                         shaper_profile->reference_count++;
630
631                 return 0;
632         }
633
634         /* TC or queue node */
635         /* check the parent node */
636         parent_node = txgbe_tm_node_search(dev, parent_node_id,
637                                            &parent_node_type);
638         if (!parent_node) {
639                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
640                 error->message = "parent not exist";
641                 return -EINVAL;
642         }
643         if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT &&
644             parent_node_type != TXGBE_TM_NODE_TYPE_TC) {
645                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
646                 error->message = "parent is not port or TC";
647                 return -EINVAL;
648         }
649         /* check level */
650         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
651             level_id != parent_node_type + 1) {
652                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
653                 error->message = "Wrong level";
654                 return -EINVAL;
655         }
656
657         /* check the node number */
658         if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
659                 /* check TC number */
660                 nb_tcs = txgbe_tc_nb_get(dev);
661                 if (tm_conf->nb_tc_node >= nb_tcs) {
662                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
663                         error->message = "too many TCs";
664                         return -EINVAL;
665                 }
666         } else {
667                 /* check queue number */
668                 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
669                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
670                         error->message = "too many queues";
671                         return -EINVAL;
672                 }
673
674                 txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
675                 if (parent_node->reference_count >= q_nb) {
676                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
677                         error->message = "too many queues than TC supported";
678                         return -EINVAL;
679                 }
680
681                 /**
682                  * check the node id.
683                  * For queue, the node id means queue id.
684                  */
685                 if (node_id >= dev->data->nb_tx_queues) {
686                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
687                         error->message = "too large queue id";
688                         return -EINVAL;
689                 }
690         }
691
692         /* add the TC or queue node */
693         tm_node = rte_zmalloc("txgbe_tm_node",
694                               sizeof(struct txgbe_tm_node),
695                               0);
696         if (!tm_node)
697                 return -ENOMEM;
698         tm_node->id = node_id;
699         tm_node->priority = priority;
700         tm_node->weight = weight;
701         tm_node->reference_count = 0;
702         tm_node->parent = parent_node;
703         tm_node->shaper_profile = shaper_profile;
704         rte_memcpy(&tm_node->params, params,
705                          sizeof(struct rte_tm_node_params));
706         if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
707                 tm_node->no = parent_node->reference_count;
708                 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
709                                   tm_node, node);
710                 tm_conf->nb_tc_node++;
711         } else {
712                 tm_node->no = q_base + parent_node->reference_count;
713                 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
714                                   tm_node, node);
715                 tm_conf->nb_queue_node++;
716         }
717         tm_node->parent->reference_count++;
718
719         /* increase the reference counter of the shaper profile */
720         if (shaper_profile)
721                 shaper_profile->reference_count++;
722
723         return 0;
724 }
725
726 static int
727 txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
728                   struct rte_tm_error *error)
729 {
730         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
731         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
732         struct txgbe_tm_node *tm_node;
733
734         if (!error)
735                 return -EINVAL;
736
737         /* if already committed */
738         if (tm_conf->committed) {
739                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
740                 error->message = "already committed";
741                 return -EINVAL;
742         }
743
744         if (node_id == RTE_TM_NODE_ID_NULL) {
745                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
746                 error->message = "invalid node id";
747                 return -EINVAL;
748         }
749
750         /* check the if the node id exists */
751         tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
752         if (!tm_node) {
753                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
754                 error->message = "no such node";
755                 return -EINVAL;
756         }
757
758         /* the node should have no child */
759         if (tm_node->reference_count) {
760                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
761                 error->message =
762                         "cannot delete a node which has children";
763                 return -EINVAL;
764         }
765
766         /* root node */
767         if (node_type == TXGBE_TM_NODE_TYPE_PORT) {
768                 if (tm_node->shaper_profile)
769                         tm_node->shaper_profile->reference_count--;
770                 rte_free(tm_node);
771                 tm_conf->root = NULL;
772                 return 0;
773         }
774
775         /* TC or queue node */
776         if (tm_node->shaper_profile)
777                 tm_node->shaper_profile->reference_count--;
778         tm_node->parent->reference_count--;
779         if (node_type == TXGBE_TM_NODE_TYPE_TC) {
780                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
781                 tm_conf->nb_tc_node--;
782         } else {
783                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
784                 tm_conf->nb_queue_node--;
785         }
786         rte_free(tm_node);
787
788         return 0;
789 }
790
791 static int
792 txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
793                     int *is_leaf, struct rte_tm_error *error)
794 {
795         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
796         struct txgbe_tm_node *tm_node;
797
798         if (!is_leaf || !error)
799                 return -EINVAL;
800
801         if (node_id == RTE_TM_NODE_ID_NULL) {
802                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
803                 error->message = "invalid node id";
804                 return -EINVAL;
805         }
806
807         /* check if the node id exists */
808         tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
809         if (!tm_node) {
810                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
811                 error->message = "no such node";
812                 return -EINVAL;
813         }
814
815         if (node_type == TXGBE_TM_NODE_TYPE_QUEUE)
816                 *is_leaf = true;
817         else
818                 *is_leaf = false;
819
820         return 0;
821 }
822
823 static int
824 txgbe_level_capabilities_get(struct rte_eth_dev *dev,
825                              uint32_t level_id,
826                              struct rte_tm_level_capabilities *cap,
827                              struct rte_tm_error *error)
828 {
829         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
830
831         if (!cap || !error)
832                 return -EINVAL;
833
834         if (level_id >= TXGBE_TM_NODE_TYPE_MAX) {
835                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
836                 error->message = "too deep level";
837                 return -EINVAL;
838         }
839
840         /* root node */
841         if (level_id == TXGBE_TM_NODE_TYPE_PORT) {
842                 cap->n_nodes_max = 1;
843                 cap->n_nodes_nonleaf_max = 1;
844                 cap->n_nodes_leaf_max = 0;
845         } else if (level_id == TXGBE_TM_NODE_TYPE_TC) {
846                 /* TC */
847                 cap->n_nodes_max = TXGBE_DCB_TC_MAX;
848                 cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX;
849                 cap->n_nodes_leaf_max = 0;
850         } else {
851                 /* queue */
852                 cap->n_nodes_max = hw->mac.max_tx_queues;
853                 cap->n_nodes_nonleaf_max = 0;
854                 cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
855         }
856
857         cap->non_leaf_nodes_identical = true;
858         cap->leaf_nodes_identical = true;
859
860         if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) {
861                 cap->nonleaf.shaper_private_supported = true;
862                 cap->nonleaf.shaper_private_dual_rate_supported = false;
863                 cap->nonleaf.shaper_private_rate_min = 0;
864                 /* 10Gbps -> 1.25GBps */
865                 cap->nonleaf.shaper_private_rate_max = 1250000000ull;
866                 cap->nonleaf.shaper_shared_n_max = 0;
867                 if (level_id == TXGBE_TM_NODE_TYPE_PORT)
868                         cap->nonleaf.sched_n_children_max =
869                                 TXGBE_DCB_TC_MAX;
870                 else
871                         cap->nonleaf.sched_n_children_max =
872                                 hw->mac.max_tx_queues;
873                 cap->nonleaf.sched_sp_n_priorities_max = 1;
874                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
875                 cap->nonleaf.sched_wfq_n_groups_max = 0;
876                 cap->nonleaf.sched_wfq_weight_max = 1;
877                 cap->nonleaf.stats_mask = 0;
878
879                 return 0;
880         }
881
882         /* queue node */
883         cap->leaf.shaper_private_supported = true;
884         cap->leaf.shaper_private_dual_rate_supported = false;
885         cap->leaf.shaper_private_rate_min = 0;
886         /* 10Gbps -> 1.25GBps */
887         cap->leaf.shaper_private_rate_max = 1250000000ull;
888         cap->leaf.shaper_shared_n_max = 0;
889         cap->leaf.cman_head_drop_supported = false;
890         cap->leaf.cman_wred_context_private_supported = true;
891         cap->leaf.cman_wred_context_shared_n_max = 0;
892         cap->leaf.stats_mask = 0;
893
894         return 0;
895 }
896
897 static int
898 txgbe_node_capabilities_get(struct rte_eth_dev *dev,
899                             uint32_t node_id,
900                             struct rte_tm_node_capabilities *cap,
901                             struct rte_tm_error *error)
902 {
903         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
904         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
905         struct txgbe_tm_node *tm_node;
906
907         if (!cap || !error)
908                 return -EINVAL;
909
910         if (node_id == RTE_TM_NODE_ID_NULL) {
911                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
912                 error->message = "invalid node id";
913                 return -EINVAL;
914         }
915
916         /* check if the node id exists */
917         tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
918         if (!tm_node) {
919                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
920                 error->message = "no such node";
921                 return -EINVAL;
922         }
923
924         cap->shaper_private_supported = true;
925         cap->shaper_private_dual_rate_supported = false;
926         cap->shaper_private_rate_min = 0;
927         /* 10Gbps -> 1.25GBps */
928         cap->shaper_private_rate_max = 1250000000ull;
929         cap->shaper_shared_n_max = 0;
930
931         if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) {
932                 cap->leaf.cman_head_drop_supported = false;
933                 cap->leaf.cman_wred_context_private_supported = true;
934                 cap->leaf.cman_wred_context_shared_n_max = 0;
935         } else {
936                 if (node_type == TXGBE_TM_NODE_TYPE_PORT)
937                         cap->nonleaf.sched_n_children_max =
938                                 TXGBE_DCB_TC_MAX;
939                 else
940                         cap->nonleaf.sched_n_children_max =
941                                 hw->mac.max_tx_queues;
942                 cap->nonleaf.sched_sp_n_priorities_max = 1;
943                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
944                 cap->nonleaf.sched_wfq_n_groups_max = 0;
945                 cap->nonleaf.sched_wfq_weight_max = 1;
946         }
947
948         cap->stats_mask = 0;
949
950         return 0;
951 }
952