net/dpaa2: support L2 payload based RSS distribution
[dpdk.git] / lib / librte_ethdev / rte_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6
7 #include <rte_errno.h>
8 #include "rte_ethdev.h"
9 #include "rte_tm_driver.h"
10 #include "rte_tm.h"
11
12 /* Get generic traffic manager operations structure from a port. */
13 const struct rte_tm_ops *
14 rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error)
15 {
16         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
17         const struct rte_tm_ops *ops;
18
19         if (!rte_eth_dev_is_valid_port(port_id)) {
20                 rte_tm_error_set(error,
21                         ENODEV,
22                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
23                         NULL,
24                         rte_strerror(ENODEV));
25                 return NULL;
26         }
27
28         if ((dev->dev_ops->tm_ops_get == NULL) ||
29                 (dev->dev_ops->tm_ops_get(dev, &ops) != 0) ||
30                 (ops == NULL)) {
31                 rte_tm_error_set(error,
32                         ENOSYS,
33                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
34                         NULL,
35                         rte_strerror(ENOSYS));
36                 return NULL;
37         }
38
39         return ops;
40 }
41
42 #define RTE_TM_FUNC(port_id, func)                              \
43 ({                                                      \
44         const struct rte_tm_ops *ops =                  \
45                 rte_tm_ops_get(port_id, error);         \
46         if (ops == NULL)                                        \
47                 return -rte_errno;                      \
48                                                         \
49         if (ops->func == NULL)                          \
50                 return -rte_tm_error_set(error,         \
51                         ENOSYS,                         \
52                         RTE_TM_ERROR_TYPE_UNSPECIFIED,  \
53                         NULL,                           \
54                         rte_strerror(ENOSYS));          \
55                                                         \
56         ops->func;                                      \
57 })
58
59 /* Get number of leaf nodes */
60 int
61 rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
62         uint32_t *n_leaf_nodes,
63         struct rte_tm_error *error)
64 {
65         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
66         const struct rte_tm_ops *ops =
67                 rte_tm_ops_get(port_id, error);
68
69         if (ops == NULL)
70                 return -rte_errno;
71
72         if (n_leaf_nodes == NULL) {
73                 rte_tm_error_set(error,
74                         EINVAL,
75                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
76                         NULL,
77                         rte_strerror(EINVAL));
78                 return -rte_errno;
79         }
80
81         *n_leaf_nodes = dev->data->nb_tx_queues;
82         return 0;
83 }
84
85 /* Check node type (leaf or non-leaf) */
86 int
87 rte_tm_node_type_get(uint16_t port_id,
88         uint32_t node_id,
89         int *is_leaf,
90         struct rte_tm_error *error)
91 {
92         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
93         return RTE_TM_FUNC(port_id, node_type_get)(dev,
94                 node_id, is_leaf, error);
95 }
96
97 /* Get capabilities */
98 int rte_tm_capabilities_get(uint16_t port_id,
99         struct rte_tm_capabilities *cap,
100         struct rte_tm_error *error)
101 {
102         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
103         return RTE_TM_FUNC(port_id, capabilities_get)(dev,
104                 cap, error);
105 }
106
107 /* Get level capabilities */
108 int rte_tm_level_capabilities_get(uint16_t port_id,
109         uint32_t level_id,
110         struct rte_tm_level_capabilities *cap,
111         struct rte_tm_error *error)
112 {
113         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
114         return RTE_TM_FUNC(port_id, level_capabilities_get)(dev,
115                 level_id, cap, error);
116 }
117
118 /* Get node capabilities */
119 int rte_tm_node_capabilities_get(uint16_t port_id,
120         uint32_t node_id,
121         struct rte_tm_node_capabilities *cap,
122         struct rte_tm_error *error)
123 {
124         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
125         return RTE_TM_FUNC(port_id, node_capabilities_get)(dev,
126                 node_id, cap, error);
127 }
128
129 /* Add WRED profile */
130 int rte_tm_wred_profile_add(uint16_t port_id,
131         uint32_t wred_profile_id,
132         struct rte_tm_wred_params *profile,
133         struct rte_tm_error *error)
134 {
135         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
136         return RTE_TM_FUNC(port_id, wred_profile_add)(dev,
137                 wred_profile_id, profile, error);
138 }
139
140 /* Delete WRED profile */
141 int rte_tm_wred_profile_delete(uint16_t port_id,
142         uint32_t wred_profile_id,
143         struct rte_tm_error *error)
144 {
145         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
146         return RTE_TM_FUNC(port_id, wred_profile_delete)(dev,
147                 wred_profile_id, error);
148 }
149
150 /* Add/update shared WRED context */
151 int rte_tm_shared_wred_context_add_update(uint16_t port_id,
152         uint32_t shared_wred_context_id,
153         uint32_t wred_profile_id,
154         struct rte_tm_error *error)
155 {
156         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
157         return RTE_TM_FUNC(port_id, shared_wred_context_add_update)(dev,
158                 shared_wred_context_id, wred_profile_id, error);
159 }
160
161 /* Delete shared WRED context */
162 int rte_tm_shared_wred_context_delete(uint16_t port_id,
163         uint32_t shared_wred_context_id,
164         struct rte_tm_error *error)
165 {
166         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
167         return RTE_TM_FUNC(port_id, shared_wred_context_delete)(dev,
168                 shared_wred_context_id, error);
169 }
170
171 /* Add shaper profile */
172 int rte_tm_shaper_profile_add(uint16_t port_id,
173         uint32_t shaper_profile_id,
174         struct rte_tm_shaper_params *profile,
175         struct rte_tm_error *error)
176 {
177         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
178         return RTE_TM_FUNC(port_id, shaper_profile_add)(dev,
179                 shaper_profile_id, profile, error);
180 }
181
182 /* Delete WRED profile */
183 int rte_tm_shaper_profile_delete(uint16_t port_id,
184         uint32_t shaper_profile_id,
185         struct rte_tm_error *error)
186 {
187         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
188         return RTE_TM_FUNC(port_id, shaper_profile_delete)(dev,
189                 shaper_profile_id, error);
190 }
191
192 /* Add shared shaper */
193 int rte_tm_shared_shaper_add_update(uint16_t port_id,
194         uint32_t shared_shaper_id,
195         uint32_t shaper_profile_id,
196         struct rte_tm_error *error)
197 {
198         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
199         return RTE_TM_FUNC(port_id, shared_shaper_add_update)(dev,
200                 shared_shaper_id, shaper_profile_id, error);
201 }
202
203 /* Delete shared shaper */
204 int rte_tm_shared_shaper_delete(uint16_t port_id,
205         uint32_t shared_shaper_id,
206         struct rte_tm_error *error)
207 {
208         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
209         return RTE_TM_FUNC(port_id, shared_shaper_delete)(dev,
210                 shared_shaper_id, error);
211 }
212
213 /* Add node to port traffic manager hierarchy */
214 int rte_tm_node_add(uint16_t port_id,
215         uint32_t node_id,
216         uint32_t parent_node_id,
217         uint32_t priority,
218         uint32_t weight,
219         uint32_t level_id,
220         struct rte_tm_node_params *params,
221         struct rte_tm_error *error)
222 {
223         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
224         return RTE_TM_FUNC(port_id, node_add)(dev,
225                 node_id, parent_node_id, priority, weight, level_id,
226                 params, error);
227 }
228
229 /* Delete node from traffic manager hierarchy */
230 int rte_tm_node_delete(uint16_t port_id,
231         uint32_t node_id,
232         struct rte_tm_error *error)
233 {
234         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
235         return RTE_TM_FUNC(port_id, node_delete)(dev,
236                 node_id, error);
237 }
238
239 /* Suspend node */
240 int rte_tm_node_suspend(uint16_t port_id,
241         uint32_t node_id,
242         struct rte_tm_error *error)
243 {
244         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
245         return RTE_TM_FUNC(port_id, node_suspend)(dev,
246                 node_id, error);
247 }
248
249 /* Resume node */
250 int rte_tm_node_resume(uint16_t port_id,
251         uint32_t node_id,
252         struct rte_tm_error *error)
253 {
254         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
255         return RTE_TM_FUNC(port_id, node_resume)(dev,
256                 node_id, error);
257 }
258
259 /* Commit the initial port traffic manager hierarchy */
260 int rte_tm_hierarchy_commit(uint16_t port_id,
261         int clear_on_fail,
262         struct rte_tm_error *error)
263 {
264         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
265         return RTE_TM_FUNC(port_id, hierarchy_commit)(dev,
266                 clear_on_fail, error);
267 }
268
269 /* Update node parent  */
270 int rte_tm_node_parent_update(uint16_t port_id,
271         uint32_t node_id,
272         uint32_t parent_node_id,
273         uint32_t priority,
274         uint32_t weight,
275         struct rte_tm_error *error)
276 {
277         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
278         return RTE_TM_FUNC(port_id, node_parent_update)(dev,
279                 node_id, parent_node_id, priority, weight, error);
280 }
281
282 /* Update node private shaper */
283 int rte_tm_node_shaper_update(uint16_t port_id,
284         uint32_t node_id,
285         uint32_t shaper_profile_id,
286         struct rte_tm_error *error)
287 {
288         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
289         return RTE_TM_FUNC(port_id, node_shaper_update)(dev,
290                 node_id, shaper_profile_id, error);
291 }
292
293 /* Update node shared shapers */
294 int rte_tm_node_shared_shaper_update(uint16_t port_id,
295         uint32_t node_id,
296         uint32_t shared_shaper_id,
297         int add,
298         struct rte_tm_error *error)
299 {
300         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
301         return RTE_TM_FUNC(port_id, node_shared_shaper_update)(dev,
302                 node_id, shared_shaper_id, add, error);
303 }
304
305 /* Update node stats */
306 int rte_tm_node_stats_update(uint16_t port_id,
307         uint32_t node_id,
308         uint64_t stats_mask,
309         struct rte_tm_error *error)
310 {
311         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
312         return RTE_TM_FUNC(port_id, node_stats_update)(dev,
313                 node_id, stats_mask, error);
314 }
315
316 /* Update WFQ weight mode */
317 int rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
318         uint32_t node_id,
319         int *wfq_weight_mode,
320         uint32_t n_sp_priorities,
321         struct rte_tm_error *error)
322 {
323         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
324         return RTE_TM_FUNC(port_id, node_wfq_weight_mode_update)(dev,
325                 node_id, wfq_weight_mode, n_sp_priorities, error);
326 }
327
328 /* Update node congestion management mode */
329 int rte_tm_node_cman_update(uint16_t port_id,
330         uint32_t node_id,
331         enum rte_tm_cman_mode cman,
332         struct rte_tm_error *error)
333 {
334         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
335         return RTE_TM_FUNC(port_id, node_cman_update)(dev,
336                 node_id, cman, error);
337 }
338
339 /* Update node private WRED context */
340 int rte_tm_node_wred_context_update(uint16_t port_id,
341         uint32_t node_id,
342         uint32_t wred_profile_id,
343         struct rte_tm_error *error)
344 {
345         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
346         return RTE_TM_FUNC(port_id, node_wred_context_update)(dev,
347                 node_id, wred_profile_id, error);
348 }
349
350 /* Update node shared WRED context */
351 int rte_tm_node_shared_wred_context_update(uint16_t port_id,
352         uint32_t node_id,
353         uint32_t shared_wred_context_id,
354         int add,
355         struct rte_tm_error *error)
356 {
357         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
358         return RTE_TM_FUNC(port_id, node_shared_wred_context_update)(dev,
359                 node_id, shared_wred_context_id, add, error);
360 }
361
362 /* Read and/or clear stats counters for specific node */
363 int rte_tm_node_stats_read(uint16_t port_id,
364         uint32_t node_id,
365         struct rte_tm_node_stats *stats,
366         uint64_t *stats_mask,
367         int clear,
368         struct rte_tm_error *error)
369 {
370         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
371         return RTE_TM_FUNC(port_id, node_stats_read)(dev,
372                 node_id, stats, stats_mask, clear, error);
373 }
374
375 /* Packet marking - VLAN DEI */
376 int rte_tm_mark_vlan_dei(uint16_t port_id,
377         int mark_green,
378         int mark_yellow,
379         int mark_red,
380         struct rte_tm_error *error)
381 {
382         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
383         return RTE_TM_FUNC(port_id, mark_vlan_dei)(dev,
384                 mark_green, mark_yellow, mark_red, error);
385 }
386
387 /* Packet marking - IPv4/IPv6 ECN */
388 int rte_tm_mark_ip_ecn(uint16_t port_id,
389         int mark_green,
390         int mark_yellow,
391         int mark_red,
392         struct rte_tm_error *error)
393 {
394         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
395         return RTE_TM_FUNC(port_id, mark_ip_ecn)(dev,
396                 mark_green, mark_yellow, mark_red, error);
397 }
398
399 /* Packet marking - IPv4/IPv6 DSCP */
400 int rte_tm_mark_ip_dscp(uint16_t port_id,
401         int mark_green,
402         int mark_yellow,
403         int mark_red,
404         struct rte_tm_error *error)
405 {
406         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
407         return RTE_TM_FUNC(port_id, mark_ip_dscp)(dev,
408                 mark_green, mark_yellow, mark_red, error);
409 }