net/hns3: fix typos on comments
[dpdk.git] / drivers / net / txgbe / txgbe_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <rte_malloc.h>
6
7 #include "txgbe_ethdev.h"
8
9 static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
10                                      struct rte_tm_capabilities *cap,
11                                      struct rte_tm_error *error);
12 static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
13                                     uint32_t shaper_profile_id,
14                                     struct rte_tm_shaper_params *profile,
15                                     struct rte_tm_error *error);
16 static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
17                                     uint32_t shaper_profile_id,
18                                     struct rte_tm_error *error);
19 static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
20                           uint32_t parent_node_id, uint32_t priority,
21                           uint32_t weight, uint32_t level_id,
22                           struct rte_tm_node_params *params,
23                           struct rte_tm_error *error);
24 static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
25                              struct rte_tm_error *error);
26 static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
27                                int *is_leaf, struct rte_tm_error *error);
28 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
29                                         uint32_t level_id,
30                                         struct rte_tm_level_capabilities *cap,
31                                         struct rte_tm_error *error);
32 static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
33                                        uint32_t node_id,
34                                        struct rte_tm_node_capabilities *cap,
35                                        struct rte_tm_error *error);
36 static int txgbe_hierarchy_commit(struct rte_eth_dev *dev,
37                                   int clear_on_fail,
38                                   struct rte_tm_error *error);
39
40 const struct rte_tm_ops txgbe_tm_ops = {
41         .capabilities_get = txgbe_tm_capabilities_get,
42         .shaper_profile_add = txgbe_shaper_profile_add,
43         .shaper_profile_delete = txgbe_shaper_profile_del,
44         .node_add = txgbe_node_add,
45         .node_delete = txgbe_node_delete,
46         .node_type_get = txgbe_node_type_get,
47         .level_capabilities_get = txgbe_level_capabilities_get,
48         .node_capabilities_get = txgbe_node_capabilities_get,
49         .hierarchy_commit = txgbe_hierarchy_commit,
50 };
51
52 int
53 txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
54                  void *arg)
55 {
56         if (!arg)
57                 return -EINVAL;
58
59         *(const void **)arg = &txgbe_tm_ops;
60
61         return 0;
62 }
63
64 void
65 txgbe_tm_conf_init(struct rte_eth_dev *dev)
66 {
67         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
68
69         /* initialize shaper profile list */
70         TAILQ_INIT(&tm_conf->shaper_profile_list);
71
72         /* initialize node configuration */
73         tm_conf->root = NULL;
74         TAILQ_INIT(&tm_conf->queue_list);
75         TAILQ_INIT(&tm_conf->tc_list);
76         tm_conf->nb_tc_node = 0;
77         tm_conf->nb_queue_node = 0;
78         tm_conf->committed = false;
79 }
80
81 void
82 txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
83 {
84         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
85         struct txgbe_tm_shaper_profile *shaper_profile;
86         struct txgbe_tm_node *tm_node;
87
88         /* clear node configuration */
89         while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
90                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
91                 rte_free(tm_node);
92         }
93         tm_conf->nb_queue_node = 0;
94         while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
95                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
96                 rte_free(tm_node);
97         }
98         tm_conf->nb_tc_node = 0;
99         if (tm_conf->root) {
100                 rte_free(tm_conf->root);
101                 tm_conf->root = NULL;
102         }
103
104         /* Remove all shaper profiles */
105         while ((shaper_profile =
106                TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
107                 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
108                              shaper_profile, node);
109                 rte_free(shaper_profile);
110         }
111 }
112
113 static inline uint8_t
114 txgbe_tc_nb_get(struct rte_eth_dev *dev)
115 {
116         struct rte_eth_conf *eth_conf;
117         uint8_t nb_tcs = 0;
118
119         eth_conf = &dev->data->dev_conf;
120         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
121                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
122         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
123                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
124                     ETH_32_POOLS)
125                         nb_tcs = ETH_4_TCS;
126                 else
127                         nb_tcs = ETH_8_TCS;
128         } else {
129                 nb_tcs = 1;
130         }
131
132         return nb_tcs;
133 }
134
135 static int
136 txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
137                           struct rte_tm_capabilities *cap,
138                           struct rte_tm_error *error)
139 {
140         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
141         uint8_t tc_nb = txgbe_tc_nb_get(dev);
142
143         if (!cap || !error)
144                 return -EINVAL;
145
146         if (tc_nb > hw->mac.max_tx_queues)
147                 return -EINVAL;
148
149         error->type = RTE_TM_ERROR_TYPE_NONE;
150
151         /* set all the parameters to 0 first. */
152         memset(cap, 0, sizeof(struct rte_tm_capabilities));
153
154         /**
155          * here is the max capability not the current configuration.
156          */
157         /* port + TCs + queues */
158         cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
159                            hw->mac.max_tx_queues;
160         cap->n_levels_max = 3;
161         cap->non_leaf_nodes_identical = 1;
162         cap->leaf_nodes_identical = 1;
163         cap->shaper_n_max = cap->n_nodes_max;
164         cap->shaper_private_n_max = cap->n_nodes_max;
165         cap->shaper_private_dual_rate_n_max = 0;
166         cap->shaper_private_rate_min = 0;
167         /* 10Gbps -> 1.25GBps */
168         cap->shaper_private_rate_max = 1250000000ull;
169         cap->shaper_shared_n_max = 0;
170         cap->shaper_shared_n_nodes_per_shaper_max = 0;
171         cap->shaper_shared_n_shapers_per_node_max = 0;
172         cap->shaper_shared_dual_rate_n_max = 0;
173         cap->shaper_shared_rate_min = 0;
174         cap->shaper_shared_rate_max = 0;
175         cap->sched_n_children_max = hw->mac.max_tx_queues;
176         /**
177          * HW supports SP. But no plan to support it now.
178          * So, all the nodes should have the same priority.
179          */
180         cap->sched_sp_n_priorities_max = 1;
181         cap->sched_wfq_n_children_per_group_max = 0;
182         cap->sched_wfq_n_groups_max = 0;
183         /**
184          * SW only supports fair round robin now.
185          * So, all the nodes should have the same weight.
186          */
187         cap->sched_wfq_weight_max = 1;
188         cap->cman_head_drop_supported = 0;
189         cap->dynamic_update_mask = 0;
190         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
191         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
192         cap->cman_wred_context_n_max = 0;
193         cap->cman_wred_context_private_n_max = 0;
194         cap->cman_wred_context_shared_n_max = 0;
195         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
196         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
197         cap->stats_mask = 0;
198
199         return 0;
200 }
201
202 static inline struct txgbe_tm_shaper_profile *
203 txgbe_shaper_profile_search(struct rte_eth_dev *dev,
204                             uint32_t shaper_profile_id)
205 {
206         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
207         struct txgbe_shaper_profile_list *shaper_profile_list =
208                 &tm_conf->shaper_profile_list;
209         struct txgbe_tm_shaper_profile *shaper_profile;
210
211         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
212                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
213                         return shaper_profile;
214         }
215
216         return NULL;
217 }
218
219 static int
220 txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
221                                  struct rte_tm_error *error)
222 {
223         /* min rate not supported */
224         if (profile->committed.rate) {
225                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
226                 error->message = "committed rate not supported";
227                 return -EINVAL;
228         }
229         /* min bucket size not supported */
230         if (profile->committed.size) {
231                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
232                 error->message = "committed bucket size not supported";
233                 return -EINVAL;
234         }
235         /* max bucket size not supported */
236         if (profile->peak.size) {
237                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
238                 error->message = "peak bucket size not supported";
239                 return -EINVAL;
240         }
241         /* length adjustment not supported */
242         if (profile->pkt_length_adjust) {
243                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
244                 error->message = "packet length adjustment not supported";
245                 return -EINVAL;
246         }
247
248         return 0;
249 }
250
251 static int
252 txgbe_shaper_profile_add(struct rte_eth_dev *dev,
253                          uint32_t shaper_profile_id,
254                          struct rte_tm_shaper_params *profile,
255                          struct rte_tm_error *error)
256 {
257         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
258         struct txgbe_tm_shaper_profile *shaper_profile;
259         int ret;
260
261         if (!profile || !error)
262                 return -EINVAL;
263
264         ret = txgbe_shaper_profile_param_check(profile, error);
265         if (ret)
266                 return ret;
267
268         shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
269
270         if (shaper_profile) {
271                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
272                 error->message = "profile ID exist";
273                 return -EINVAL;
274         }
275
276         shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile",
277                                      sizeof(struct txgbe_tm_shaper_profile),
278                                      0);
279         if (!shaper_profile)
280                 return -ENOMEM;
281         shaper_profile->shaper_profile_id = shaper_profile_id;
282         rte_memcpy(&shaper_profile->profile, profile,
283                          sizeof(struct rte_tm_shaper_params));
284         TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
285                           shaper_profile, node);
286
287         return 0;
288 }
289
290 static int
291 txgbe_shaper_profile_del(struct rte_eth_dev *dev,
292                          uint32_t shaper_profile_id,
293                          struct rte_tm_error *error)
294 {
295         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
296         struct txgbe_tm_shaper_profile *shaper_profile;
297
298         if (!error)
299                 return -EINVAL;
300
301         shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
302
303         if (!shaper_profile) {
304                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
305                 error->message = "profile ID not exist";
306                 return -EINVAL;
307         }
308
309         /* don't delete a profile if it's used by one or several nodes */
310         if (shaper_profile->reference_count) {
311                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
312                 error->message = "profile in use";
313                 return -EINVAL;
314         }
315
316         TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
317         rte_free(shaper_profile);
318
319         return 0;
320 }
321
322 static inline struct txgbe_tm_node *
323 txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
324                      enum txgbe_tm_node_type *node_type)
325 {
326         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
327         struct txgbe_tm_node *tm_node;
328
329         if (tm_conf->root && tm_conf->root->id == node_id) {
330                 *node_type = TXGBE_TM_NODE_TYPE_PORT;
331                 return tm_conf->root;
332         }
333
334         TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
335                 if (tm_node->id == node_id) {
336                         *node_type = TXGBE_TM_NODE_TYPE_TC;
337                         return tm_node;
338                 }
339         }
340
341         TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
342                 if (tm_node->id == node_id) {
343                         *node_type = TXGBE_TM_NODE_TYPE_QUEUE;
344                         return tm_node;
345                 }
346         }
347
348         return NULL;
349 }
350
351 static void
352 txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
353                         uint16_t *base, uint16_t *nb)
354 {
355         uint8_t nb_tcs = txgbe_tc_nb_get(dev);
356         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
357         uint16_t vf_num = pci_dev->max_vfs;
358
359         *base = 0;
360         *nb = 0;
361
362         /* VT on */
363         if (vf_num) {
364                 /* no DCB */
365                 if (nb_tcs == 1) {
366                         if (vf_num >= ETH_32_POOLS) {
367                                 *nb = 2;
368                                 *base = vf_num * 2;
369                         } else if (vf_num >= ETH_16_POOLS) {
370                                 *nb = 4;
371                                 *base = vf_num * 4;
372                         } else {
373                                 *nb = 8;
374                                 *base = vf_num * 8;
375                         }
376                 } else {
377                         /* DCB */
378                         *nb = 1;
379                         *base = vf_num * nb_tcs + tc_node_no;
380                 }
381         } else {
382                 /* VT off */
383                 if (nb_tcs == ETH_8_TCS) {
384                         switch (tc_node_no) {
385                         case 0:
386                                 *base = 0;
387                                 *nb = 32;
388                                 break;
389                         case 1:
390                                 *base = 32;
391                                 *nb = 32;
392                                 break;
393                         case 2:
394                                 *base = 64;
395                                 *nb = 16;
396                                 break;
397                         case 3:
398                                 *base = 80;
399                                 *nb = 16;
400                                 break;
401                         case 4:
402                                 *base = 96;
403                                 *nb = 8;
404                                 break;
405                         case 5:
406                                 *base = 104;
407                                 *nb = 8;
408                                 break;
409                         case 6:
410                                 *base = 112;
411                                 *nb = 8;
412                                 break;
413                         case 7:
414                                 *base = 120;
415                                 *nb = 8;
416                                 break;
417                         default:
418                                 return;
419                         }
420                 } else {
421                         switch (tc_node_no) {
422                         /**
423                          * If no VF and no DCB, only 64 queues can be used.
424                          * This case also be covered by this "case 0".
425                          */
426                         case 0:
427                                 *base = 0;
428                                 *nb = 64;
429                                 break;
430                         case 1:
431                                 *base = 64;
432                                 *nb = 32;
433                                 break;
434                         case 2:
435                                 *base = 96;
436                                 *nb = 16;
437                                 break;
438                         case 3:
439                                 *base = 112;
440                                 *nb = 16;
441                                 break;
442                         default:
443                                 return;
444                         }
445                 }
446         }
447 }
448
449 static int
450 txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
451                        uint32_t priority, uint32_t weight,
452                        struct rte_tm_node_params *params,
453                        struct rte_tm_error *error)
454 {
455         if (node_id == RTE_TM_NODE_ID_NULL) {
456                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
457                 error->message = "invalid node id";
458                 return -EINVAL;
459         }
460
461         if (priority) {
462                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
463                 error->message = "priority should be 0";
464                 return -EINVAL;
465         }
466
467         if (weight != 1) {
468                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
469                 error->message = "weight must be 1";
470                 return -EINVAL;
471         }
472
473         /* not support shared shaper */
474         if (params->shared_shaper_id) {
475                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
476                 error->message = "shared shaper not supported";
477                 return -EINVAL;
478         }
479         if (params->n_shared_shapers) {
480                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
481                 error->message = "shared shaper not supported";
482                 return -EINVAL;
483         }
484
485         /* for non-leaf node */
486         if (node_id >= dev->data->nb_tx_queues) {
487                 /* check the unsupported parameters */
488                 if (params->nonleaf.wfq_weight_mode) {
489                         error->type =
490                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
491                         error->message = "WFQ not supported";
492                         return -EINVAL;
493                 }
494                 if (params->nonleaf.n_sp_priorities != 1) {
495                         error->type =
496                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
497                         error->message = "SP priority not supported";
498                         return -EINVAL;
499                 } else if (params->nonleaf.wfq_weight_mode &&
500                            !(*params->nonleaf.wfq_weight_mode)) {
501                         error->type =
502                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
503                         error->message = "WFP should be byte mode";
504                         return -EINVAL;
505                 }
506
507                 return 0;
508         }
509
510         /* for leaf node */
511         /* check the unsupported parameters */
512         if (params->leaf.cman) {
513                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
514                 error->message = "Congestion management not supported";
515                 return -EINVAL;
516         }
517         if (params->leaf.wred.wred_profile_id !=
518             RTE_TM_WRED_PROFILE_ID_NONE) {
519                 error->type =
520                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
521                 error->message = "WRED not supported";
522                 return -EINVAL;
523         }
524         if (params->leaf.wred.shared_wred_context_id) {
525                 error->type =
526                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
527                 error->message = "WRED not supported";
528                 return -EINVAL;
529         }
530         if (params->leaf.wred.n_shared_wred_contexts) {
531                 error->type =
532                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
533                 error->message = "WRED not supported";
534                 return -EINVAL;
535         }
536
537         return 0;
538 }
539
540 /**
541  * Now the TC and queue configuration is controlled by DCB.
542  * We need check if the node configuration follows the DCB configuration.
543  * In the future, we may use TM to cover DCB.
544  */
545 static int
546 txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
547                uint32_t parent_node_id, uint32_t priority,
548                uint32_t weight, uint32_t level_id,
549                struct rte_tm_node_params *params,
550                struct rte_tm_error *error)
551 {
552         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
553         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
554         enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX;
555         struct txgbe_tm_shaper_profile *shaper_profile = NULL;
556         struct txgbe_tm_node *tm_node;
557         struct txgbe_tm_node *parent_node;
558         uint8_t nb_tcs;
559         uint16_t q_base = 0;
560         uint16_t q_nb = 0;
561         int ret;
562
563         if (!params || !error)
564                 return -EINVAL;
565
566         /* if already committed */
567         if (tm_conf->committed) {
568                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
569                 error->message = "already committed";
570                 return -EINVAL;
571         }
572
573         ret = txgbe_node_param_check(dev, node_id, priority, weight,
574                                      params, error);
575         if (ret)
576                 return ret;
577
578         /* check if the node ID is already used */
579         if (txgbe_tm_node_search(dev, node_id, &node_type)) {
580                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
581                 error->message = "node id already used";
582                 return -EINVAL;
583         }
584
585         /* check the shaper profile id */
586         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
587                 shaper_profile = txgbe_shaper_profile_search(dev,
588                                         params->shaper_profile_id);
589                 if (!shaper_profile) {
590                         error->type =
591                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
592                         error->message = "shaper profile not exist";
593                         return -EINVAL;
594                 }
595         }
596
597         /* root node if not have a parent */
598         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
599                 /* check level */
600                 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
601                     level_id > TXGBE_TM_NODE_TYPE_PORT) {
602                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
603                         error->message = "Wrong level";
604                         return -EINVAL;
605                 }
606
607                 /* obviously no more than one root */
608                 if (tm_conf->root) {
609                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
610                         error->message = "already have a root";
611                         return -EINVAL;
612                 }
613
614                 /* add the root node */
615                 tm_node = rte_zmalloc("txgbe_tm_node",
616                                       sizeof(struct txgbe_tm_node),
617                                       0);
618                 if (!tm_node)
619                         return -ENOMEM;
620                 tm_node->id = node_id;
621                 tm_node->priority = priority;
622                 tm_node->weight = weight;
623                 tm_node->reference_count = 0;
624                 tm_node->no = 0;
625                 tm_node->parent = NULL;
626                 tm_node->shaper_profile = shaper_profile;
627                 rte_memcpy(&tm_node->params, params,
628                                  sizeof(struct rte_tm_node_params));
629                 tm_conf->root = tm_node;
630
631                 /* increase the reference counter of the shaper profile */
632                 if (shaper_profile)
633                         shaper_profile->reference_count++;
634
635                 return 0;
636         }
637
638         /* TC or queue node */
639         /* check the parent node */
640         parent_node = txgbe_tm_node_search(dev, parent_node_id,
641                                            &parent_node_type);
642         if (!parent_node) {
643                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
644                 error->message = "parent not exist";
645                 return -EINVAL;
646         }
647         if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT &&
648             parent_node_type != TXGBE_TM_NODE_TYPE_TC) {
649                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
650                 error->message = "parent is not port or TC";
651                 return -EINVAL;
652         }
653         /* check level */
654         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
655             level_id != parent_node_type + 1) {
656                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
657                 error->message = "Wrong level";
658                 return -EINVAL;
659         }
660
661         /* check the node number */
662         if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
663                 /* check TC number */
664                 nb_tcs = txgbe_tc_nb_get(dev);
665                 if (tm_conf->nb_tc_node >= nb_tcs) {
666                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
667                         error->message = "too many TCs";
668                         return -EINVAL;
669                 }
670         } else {
671                 /* check queue number */
672                 if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
673                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
674                         error->message = "too many queues";
675                         return -EINVAL;
676                 }
677
678                 txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
679                 if (parent_node->reference_count >= q_nb) {
680                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
681                         error->message = "too many queues than TC supported";
682                         return -EINVAL;
683                 }
684
685                 /**
686                  * check the node id.
687                  * For queue, the node id means queue id.
688                  */
689                 if (node_id >= dev->data->nb_tx_queues) {
690                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
691                         error->message = "too large queue id";
692                         return -EINVAL;
693                 }
694         }
695
696         /* add the TC or queue node */
697         tm_node = rte_zmalloc("txgbe_tm_node",
698                               sizeof(struct txgbe_tm_node),
699                               0);
700         if (!tm_node)
701                 return -ENOMEM;
702         tm_node->id = node_id;
703         tm_node->priority = priority;
704         tm_node->weight = weight;
705         tm_node->reference_count = 0;
706         tm_node->parent = parent_node;
707         tm_node->shaper_profile = shaper_profile;
708         rte_memcpy(&tm_node->params, params,
709                          sizeof(struct rte_tm_node_params));
710         if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
711                 tm_node->no = parent_node->reference_count;
712                 TAILQ_INSERT_TAIL(&tm_conf->tc_list,
713                                   tm_node, node);
714                 tm_conf->nb_tc_node++;
715         } else {
716                 tm_node->no = q_base + parent_node->reference_count;
717                 TAILQ_INSERT_TAIL(&tm_conf->queue_list,
718                                   tm_node, node);
719                 tm_conf->nb_queue_node++;
720         }
721         tm_node->parent->reference_count++;
722
723         /* increase the reference counter of the shaper profile */
724         if (shaper_profile)
725                 shaper_profile->reference_count++;
726
727         return 0;
728 }
729
730 static int
731 txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
732                   struct rte_tm_error *error)
733 {
734         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
735         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
736         struct txgbe_tm_node *tm_node;
737
738         if (!error)
739                 return -EINVAL;
740
741         /* if already committed */
742         if (tm_conf->committed) {
743                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
744                 error->message = "already committed";
745                 return -EINVAL;
746         }
747
748         if (node_id == RTE_TM_NODE_ID_NULL) {
749                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
750                 error->message = "invalid node id";
751                 return -EINVAL;
752         }
753
754         /* check the if the node id exists */
755         tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
756         if (!tm_node) {
757                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
758                 error->message = "no such node";
759                 return -EINVAL;
760         }
761
762         /* the node should have no child */
763         if (tm_node->reference_count) {
764                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
765                 error->message =
766                         "cannot delete a node which has children";
767                 return -EINVAL;
768         }
769
770         /* root node */
771         if (node_type == TXGBE_TM_NODE_TYPE_PORT) {
772                 if (tm_node->shaper_profile)
773                         tm_node->shaper_profile->reference_count--;
774                 rte_free(tm_node);
775                 tm_conf->root = NULL;
776                 return 0;
777         }
778
779         /* TC or queue node */
780         if (tm_node->shaper_profile)
781                 tm_node->shaper_profile->reference_count--;
782         tm_node->parent->reference_count--;
783         if (node_type == TXGBE_TM_NODE_TYPE_TC) {
784                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
785                 tm_conf->nb_tc_node--;
786         } else {
787                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
788                 tm_conf->nb_queue_node--;
789         }
790         rte_free(tm_node);
791
792         return 0;
793 }
794
795 static int
796 txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
797                     int *is_leaf, struct rte_tm_error *error)
798 {
799         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
800         struct txgbe_tm_node *tm_node;
801
802         if (!is_leaf || !error)
803                 return -EINVAL;
804
805         if (node_id == RTE_TM_NODE_ID_NULL) {
806                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
807                 error->message = "invalid node id";
808                 return -EINVAL;
809         }
810
811         /* check if the node id exists */
812         tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
813         if (!tm_node) {
814                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
815                 error->message = "no such node";
816                 return -EINVAL;
817         }
818
819         if (node_type == TXGBE_TM_NODE_TYPE_QUEUE)
820                 *is_leaf = true;
821         else
822                 *is_leaf = false;
823
824         return 0;
825 }
826
827 static int
828 txgbe_level_capabilities_get(struct rte_eth_dev *dev,
829                              uint32_t level_id,
830                              struct rte_tm_level_capabilities *cap,
831                              struct rte_tm_error *error)
832 {
833         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
834
835         if (!cap || !error)
836                 return -EINVAL;
837
838         if (level_id >= TXGBE_TM_NODE_TYPE_MAX) {
839                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
840                 error->message = "too deep level";
841                 return -EINVAL;
842         }
843
844         /* root node */
845         if (level_id == TXGBE_TM_NODE_TYPE_PORT) {
846                 cap->n_nodes_max = 1;
847                 cap->n_nodes_nonleaf_max = 1;
848                 cap->n_nodes_leaf_max = 0;
849         } else if (level_id == TXGBE_TM_NODE_TYPE_TC) {
850                 /* TC */
851                 cap->n_nodes_max = TXGBE_DCB_TC_MAX;
852                 cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX;
853                 cap->n_nodes_leaf_max = 0;
854         } else {
855                 /* queue */
856                 cap->n_nodes_max = hw->mac.max_tx_queues;
857                 cap->n_nodes_nonleaf_max = 0;
858                 cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
859         }
860
861         cap->non_leaf_nodes_identical = true;
862         cap->leaf_nodes_identical = true;
863
864         if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) {
865                 cap->nonleaf.shaper_private_supported = true;
866                 cap->nonleaf.shaper_private_dual_rate_supported = false;
867                 cap->nonleaf.shaper_private_rate_min = 0;
868                 /* 10Gbps -> 1.25GBps */
869                 cap->nonleaf.shaper_private_rate_max = 1250000000ull;
870                 cap->nonleaf.shaper_shared_n_max = 0;
871                 if (level_id == TXGBE_TM_NODE_TYPE_PORT)
872                         cap->nonleaf.sched_n_children_max =
873                                 TXGBE_DCB_TC_MAX;
874                 else
875                         cap->nonleaf.sched_n_children_max =
876                                 hw->mac.max_tx_queues;
877                 cap->nonleaf.sched_sp_n_priorities_max = 1;
878                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
879                 cap->nonleaf.sched_wfq_n_groups_max = 0;
880                 cap->nonleaf.sched_wfq_weight_max = 1;
881                 cap->nonleaf.stats_mask = 0;
882
883                 return 0;
884         }
885
886         /* queue node */
887         cap->leaf.shaper_private_supported = true;
888         cap->leaf.shaper_private_dual_rate_supported = false;
889         cap->leaf.shaper_private_rate_min = 0;
890         /* 10Gbps -> 1.25GBps */
891         cap->leaf.shaper_private_rate_max = 1250000000ull;
892         cap->leaf.shaper_shared_n_max = 0;
893         cap->leaf.cman_head_drop_supported = false;
894         cap->leaf.cman_wred_context_private_supported = true;
895         cap->leaf.cman_wred_context_shared_n_max = 0;
896         cap->leaf.stats_mask = 0;
897
898         return 0;
899 }
900
901 static int
902 txgbe_node_capabilities_get(struct rte_eth_dev *dev,
903                             uint32_t node_id,
904                             struct rte_tm_node_capabilities *cap,
905                             struct rte_tm_error *error)
906 {
907         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
908         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
909         struct txgbe_tm_node *tm_node;
910
911         if (!cap || !error)
912                 return -EINVAL;
913
914         if (node_id == RTE_TM_NODE_ID_NULL) {
915                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
916                 error->message = "invalid node id";
917                 return -EINVAL;
918         }
919
920         /* check if the node id exists */
921         tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
922         if (!tm_node) {
923                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
924                 error->message = "no such node";
925                 return -EINVAL;
926         }
927
928         cap->shaper_private_supported = true;
929         cap->shaper_private_dual_rate_supported = false;
930         cap->shaper_private_rate_min = 0;
931         /* 10Gbps -> 1.25GBps */
932         cap->shaper_private_rate_max = 1250000000ull;
933         cap->shaper_shared_n_max = 0;
934
935         if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) {
936                 cap->leaf.cman_head_drop_supported = false;
937                 cap->leaf.cman_wred_context_private_supported = true;
938                 cap->leaf.cman_wred_context_shared_n_max = 0;
939         } else {
940                 if (node_type == TXGBE_TM_NODE_TYPE_PORT)
941                         cap->nonleaf.sched_n_children_max =
942                                 TXGBE_DCB_TC_MAX;
943                 else
944                         cap->nonleaf.sched_n_children_max =
945                                 hw->mac.max_tx_queues;
946                 cap->nonleaf.sched_sp_n_priorities_max = 1;
947                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
948                 cap->nonleaf.sched_wfq_n_groups_max = 0;
949                 cap->nonleaf.sched_wfq_weight_max = 1;
950         }
951
952         cap->stats_mask = 0;
953
954         return 0;
955 }
956
957 static int
958 txgbe_hierarchy_commit(struct rte_eth_dev *dev,
959                        int clear_on_fail,
960                        struct rte_tm_error *error)
961 {
962         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
963         struct txgbe_tm_node *tm_node;
964         uint64_t bw;
965         int ret;
966
967         if (!error)
968                 return -EINVAL;
969
970         /* check the setting */
971         if (!tm_conf->root)
972                 goto done;
973
974         /* not support port max bandwidth yet */
975         if (tm_conf->root->shaper_profile &&
976             tm_conf->root->shaper_profile->profile.peak.rate) {
977                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
978                 error->message = "no port max bandwidth";
979                 goto fail_clear;
980         }
981
982         /* HW not support TC max bandwidth */
983         TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
984                 if (tm_node->shaper_profile &&
985                     tm_node->shaper_profile->profile.peak.rate) {
986                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
987                         error->message = "no TC max bandwidth";
988                         goto fail_clear;
989                 }
990         }
991
992         /* queue max bandwidth */
993         TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
994                 if (tm_node->shaper_profile)
995                         bw = tm_node->shaper_profile->profile.peak.rate;
996                 else
997                         bw = 0;
998                 if (bw) {
999                         /* interpret Bps to Mbps */
1000                         bw = bw * 8 / 1000 / 1000;
1001                         ret = txgbe_set_queue_rate_limit(dev, tm_node->no, bw);
1002                         if (ret) {
1003                                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
1004                                 error->message =
1005                                         "failed to set queue max bandwidth";
1006                                 goto fail_clear;
1007                         }
1008                 }
1009         }
1010
1011 done:
1012         tm_conf->committed = true;
1013         return 0;
1014
1015 fail_clear:
1016         /* clear all the traffic manager configuration */
1017         if (clear_on_fail) {
1018                 txgbe_tm_conf_uninit(dev);
1019                 txgbe_tm_conf_init(dev);
1020         }
1021         return -EINVAL;
1022 }