net/txgbe: add TM capabilities get operation
[dpdk.git] / drivers / net / txgbe / txgbe_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <rte_malloc.h>
6
7 #include "txgbe_ethdev.h"
8
9 static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
10                                      struct rte_tm_capabilities *cap,
11                                      struct rte_tm_error *error);
12 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
13                                         uint32_t level_id,
14                                         struct rte_tm_level_capabilities *cap,
15                                         struct rte_tm_error *error);
16 static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
17                                        uint32_t node_id,
18                                        struct rte_tm_node_capabilities *cap,
19                                        struct rte_tm_error *error);
20
21 const struct rte_tm_ops txgbe_tm_ops = {
22         .capabilities_get = txgbe_tm_capabilities_get,
23         .level_capabilities_get = txgbe_level_capabilities_get,
24         .node_capabilities_get = txgbe_node_capabilities_get,
25 };
26
27 int
28 txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
29                  void *arg)
30 {
31         if (!arg)
32                 return -EINVAL;
33
34         *(const void **)arg = &txgbe_tm_ops;
35
36         return 0;
37 }
38
39 void
40 txgbe_tm_conf_init(struct rte_eth_dev *dev)
41 {
42         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
43
44         /* initialize shaper profile list */
45         TAILQ_INIT(&tm_conf->shaper_profile_list);
46
47         /* initialize node configuration */
48         tm_conf->root = NULL;
49         TAILQ_INIT(&tm_conf->queue_list);
50         TAILQ_INIT(&tm_conf->tc_list);
51         tm_conf->nb_tc_node = 0;
52         tm_conf->nb_queue_node = 0;
53         tm_conf->committed = false;
54 }
55
56 void
57 txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
58 {
59         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
60         struct txgbe_tm_shaper_profile *shaper_profile;
61         struct txgbe_tm_node *tm_node;
62
63         /* clear node configuration */
64         while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
65                 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
66                 rte_free(tm_node);
67         }
68         tm_conf->nb_queue_node = 0;
69         while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
70                 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
71                 rte_free(tm_node);
72         }
73         tm_conf->nb_tc_node = 0;
74         if (tm_conf->root) {
75                 rte_free(tm_conf->root);
76                 tm_conf->root = NULL;
77         }
78
79         /* Remove all shaper profiles */
80         while ((shaper_profile =
81                TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
82                 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
83                              shaper_profile, node);
84                 rte_free(shaper_profile);
85         }
86 }
87
88 static inline uint8_t
89 txgbe_tc_nb_get(struct rte_eth_dev *dev)
90 {
91         struct rte_eth_conf *eth_conf;
92         uint8_t nb_tcs = 0;
93
94         eth_conf = &dev->data->dev_conf;
95         if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
96                 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
97         } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
98                 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
99                     ETH_32_POOLS)
100                         nb_tcs = ETH_4_TCS;
101                 else
102                         nb_tcs = ETH_8_TCS;
103         } else {
104                 nb_tcs = 1;
105         }
106
107         return nb_tcs;
108 }
109
110 static int
111 txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
112                           struct rte_tm_capabilities *cap,
113                           struct rte_tm_error *error)
114 {
115         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
116         uint8_t tc_nb = txgbe_tc_nb_get(dev);
117
118         if (!cap || !error)
119                 return -EINVAL;
120
121         if (tc_nb > hw->mac.max_tx_queues)
122                 return -EINVAL;
123
124         error->type = RTE_TM_ERROR_TYPE_NONE;
125
126         /* set all the parameters to 0 first. */
127         memset(cap, 0, sizeof(struct rte_tm_capabilities));
128
129         /**
130          * here is the max capability not the current configuration.
131          */
132         /* port + TCs + queues */
133         cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
134                            hw->mac.max_tx_queues;
135         cap->n_levels_max = 3;
136         cap->non_leaf_nodes_identical = 1;
137         cap->leaf_nodes_identical = 1;
138         cap->shaper_n_max = cap->n_nodes_max;
139         cap->shaper_private_n_max = cap->n_nodes_max;
140         cap->shaper_private_dual_rate_n_max = 0;
141         cap->shaper_private_rate_min = 0;
142         /* 10Gbps -> 1.25GBps */
143         cap->shaper_private_rate_max = 1250000000ull;
144         cap->shaper_shared_n_max = 0;
145         cap->shaper_shared_n_nodes_per_shaper_max = 0;
146         cap->shaper_shared_n_shapers_per_node_max = 0;
147         cap->shaper_shared_dual_rate_n_max = 0;
148         cap->shaper_shared_rate_min = 0;
149         cap->shaper_shared_rate_max = 0;
150         cap->sched_n_children_max = hw->mac.max_tx_queues;
151         /**
152          * HW supports SP. But no plan to support it now.
153          * So, all the nodes should have the same priority.
154          */
155         cap->sched_sp_n_priorities_max = 1;
156         cap->sched_wfq_n_children_per_group_max = 0;
157         cap->sched_wfq_n_groups_max = 0;
158         /**
159          * SW only supports fair round robin now.
160          * So, all the nodes should have the same weight.
161          */
162         cap->sched_wfq_weight_max = 1;
163         cap->cman_head_drop_supported = 0;
164         cap->dynamic_update_mask = 0;
165         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
166         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
167         cap->cman_wred_context_n_max = 0;
168         cap->cman_wred_context_private_n_max = 0;
169         cap->cman_wred_context_shared_n_max = 0;
170         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
171         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
172         cap->stats_mask = 0;
173
174         return 0;
175 }
176
177 static inline struct txgbe_tm_node *
178 txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
179                      enum txgbe_tm_node_type *node_type)
180 {
181         struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
182         struct txgbe_tm_node *tm_node;
183
184         if (tm_conf->root && tm_conf->root->id == node_id) {
185                 *node_type = TXGBE_TM_NODE_TYPE_PORT;
186                 return tm_conf->root;
187         }
188
189         TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
190                 if (tm_node->id == node_id) {
191                         *node_type = TXGBE_TM_NODE_TYPE_TC;
192                         return tm_node;
193                 }
194         }
195
196         TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
197                 if (tm_node->id == node_id) {
198                         *node_type = TXGBE_TM_NODE_TYPE_QUEUE;
199                         return tm_node;
200                 }
201         }
202
203         return NULL;
204 }
205
206 static int
207 txgbe_level_capabilities_get(struct rte_eth_dev *dev,
208                              uint32_t level_id,
209                              struct rte_tm_level_capabilities *cap,
210                              struct rte_tm_error *error)
211 {
212         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
213
214         if (!cap || !error)
215                 return -EINVAL;
216
217         if (level_id >= TXGBE_TM_NODE_TYPE_MAX) {
218                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
219                 error->message = "too deep level";
220                 return -EINVAL;
221         }
222
223         /* root node */
224         if (level_id == TXGBE_TM_NODE_TYPE_PORT) {
225                 cap->n_nodes_max = 1;
226                 cap->n_nodes_nonleaf_max = 1;
227                 cap->n_nodes_leaf_max = 0;
228         } else if (level_id == TXGBE_TM_NODE_TYPE_TC) {
229                 /* TC */
230                 cap->n_nodes_max = TXGBE_DCB_TC_MAX;
231                 cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX;
232                 cap->n_nodes_leaf_max = 0;
233         } else {
234                 /* queue */
235                 cap->n_nodes_max = hw->mac.max_tx_queues;
236                 cap->n_nodes_nonleaf_max = 0;
237                 cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
238         }
239
240         cap->non_leaf_nodes_identical = true;
241         cap->leaf_nodes_identical = true;
242
243         if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) {
244                 cap->nonleaf.shaper_private_supported = true;
245                 cap->nonleaf.shaper_private_dual_rate_supported = false;
246                 cap->nonleaf.shaper_private_rate_min = 0;
247                 /* 10Gbps -> 1.25GBps */
248                 cap->nonleaf.shaper_private_rate_max = 1250000000ull;
249                 cap->nonleaf.shaper_shared_n_max = 0;
250                 if (level_id == TXGBE_TM_NODE_TYPE_PORT)
251                         cap->nonleaf.sched_n_children_max =
252                                 TXGBE_DCB_TC_MAX;
253                 else
254                         cap->nonleaf.sched_n_children_max =
255                                 hw->mac.max_tx_queues;
256                 cap->nonleaf.sched_sp_n_priorities_max = 1;
257                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
258                 cap->nonleaf.sched_wfq_n_groups_max = 0;
259                 cap->nonleaf.sched_wfq_weight_max = 1;
260                 cap->nonleaf.stats_mask = 0;
261
262                 return 0;
263         }
264
265         /* queue node */
266         cap->leaf.shaper_private_supported = true;
267         cap->leaf.shaper_private_dual_rate_supported = false;
268         cap->leaf.shaper_private_rate_min = 0;
269         /* 10Gbps -> 1.25GBps */
270         cap->leaf.shaper_private_rate_max = 1250000000ull;
271         cap->leaf.shaper_shared_n_max = 0;
272         cap->leaf.cman_head_drop_supported = false;
273         cap->leaf.cman_wred_context_private_supported = true;
274         cap->leaf.cman_wred_context_shared_n_max = 0;
275         cap->leaf.stats_mask = 0;
276
277         return 0;
278 }
279
280 static int
281 txgbe_node_capabilities_get(struct rte_eth_dev *dev,
282                             uint32_t node_id,
283                             struct rte_tm_node_capabilities *cap,
284                             struct rte_tm_error *error)
285 {
286         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
287         enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
288         struct txgbe_tm_node *tm_node;
289
290         if (!cap || !error)
291                 return -EINVAL;
292
293         if (node_id == RTE_TM_NODE_ID_NULL) {
294                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
295                 error->message = "invalid node id";
296                 return -EINVAL;
297         }
298
299         /* check if the node id exists */
300         tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
301         if (!tm_node) {
302                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
303                 error->message = "no such node";
304                 return -EINVAL;
305         }
306
307         cap->shaper_private_supported = true;
308         cap->shaper_private_dual_rate_supported = false;
309         cap->shaper_private_rate_min = 0;
310         /* 10Gbps -> 1.25GBps */
311         cap->shaper_private_rate_max = 1250000000ull;
312         cap->shaper_shared_n_max = 0;
313
314         if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) {
315                 cap->leaf.cman_head_drop_supported = false;
316                 cap->leaf.cman_wred_context_private_supported = true;
317                 cap->leaf.cman_wred_context_shared_n_max = 0;
318         } else {
319                 if (node_type == TXGBE_TM_NODE_TYPE_PORT)
320                         cap->nonleaf.sched_n_children_max =
321                                 TXGBE_DCB_TC_MAX;
322                 else
323                         cap->nonleaf.sched_n_children_max =
324                                 hw->mac.max_tx_queues;
325                 cap->nonleaf.sched_sp_n_priorities_max = 1;
326                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
327                 cap->nonleaf.sched_wfq_n_groups_max = 0;
328                 cap->nonleaf.sched_wfq_weight_max = 1;
329         }
330
331         cap->stats_mask = 0;
332
333         return 0;
334 }
335