1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
8 #include "rte_ethdev.h"
9 #include "rte_tm_driver.h"
12 /* Get generic traffic manager operations structure from a port. */
13 const struct rte_tm_ops *
14 rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error)
16 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
17 const struct rte_tm_ops *ops;
19 if (!rte_eth_dev_is_valid_port(port_id)) {
20 rte_tm_error_set(error,
22 RTE_TM_ERROR_TYPE_UNSPECIFIED,
24 rte_strerror(ENODEV));
28 if ((dev->dev_ops->tm_ops_get == NULL) ||
29 (dev->dev_ops->tm_ops_get(dev, &ops) != 0) ||
31 rte_tm_error_set(error,
33 RTE_TM_ERROR_TYPE_UNSPECIFIED,
35 rte_strerror(ENOSYS));
42 #define RTE_TM_FUNC(port_id, func) \
44 const struct rte_tm_ops *ops = \
45 rte_tm_ops_get(port_id, error); \
49 if (ops->func == NULL) \
50 return -rte_tm_error_set(error, \
52 RTE_TM_ERROR_TYPE_UNSPECIFIED, \
54 rte_strerror(ENOSYS)); \
59 /* Get number of leaf nodes */
61 rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
62 uint32_t *n_leaf_nodes,
63 struct rte_tm_error *error)
65 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
66 const struct rte_tm_ops *ops =
67 rte_tm_ops_get(port_id, error);
72 if (n_leaf_nodes == NULL) {
73 rte_tm_error_set(error,
75 RTE_TM_ERROR_TYPE_UNSPECIFIED,
77 rte_strerror(EINVAL));
81 *n_leaf_nodes = dev->data->nb_tx_queues;
85 /* Check node type (leaf or non-leaf) */
87 rte_tm_node_type_get(uint16_t port_id,
90 struct rte_tm_error *error)
92 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
93 return RTE_TM_FUNC(port_id, node_type_get)(dev,
94 node_id, is_leaf, error);
97 /* Get capabilities */
98 int rte_tm_capabilities_get(uint16_t port_id,
99 struct rte_tm_capabilities *cap,
100 struct rte_tm_error *error)
102 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
103 return RTE_TM_FUNC(port_id, capabilities_get)(dev,
107 /* Get level capabilities */
108 int rte_tm_level_capabilities_get(uint16_t port_id,
110 struct rte_tm_level_capabilities *cap,
111 struct rte_tm_error *error)
113 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
114 return RTE_TM_FUNC(port_id, level_capabilities_get)(dev,
115 level_id, cap, error);
118 /* Get node capabilities */
119 int rte_tm_node_capabilities_get(uint16_t port_id,
121 struct rte_tm_node_capabilities *cap,
122 struct rte_tm_error *error)
124 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
125 return RTE_TM_FUNC(port_id, node_capabilities_get)(dev,
126 node_id, cap, error);
129 /* Add WRED profile */
130 int rte_tm_wred_profile_add(uint16_t port_id,
131 uint32_t wred_profile_id,
132 struct rte_tm_wred_params *profile,
133 struct rte_tm_error *error)
135 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
136 return RTE_TM_FUNC(port_id, wred_profile_add)(dev,
137 wred_profile_id, profile, error);
140 /* Delete WRED profile */
141 int rte_tm_wred_profile_delete(uint16_t port_id,
142 uint32_t wred_profile_id,
143 struct rte_tm_error *error)
145 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
146 return RTE_TM_FUNC(port_id, wred_profile_delete)(dev,
147 wred_profile_id, error);
150 /* Add/update shared WRED context */
151 int rte_tm_shared_wred_context_add_update(uint16_t port_id,
152 uint32_t shared_wred_context_id,
153 uint32_t wred_profile_id,
154 struct rte_tm_error *error)
156 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
157 return RTE_TM_FUNC(port_id, shared_wred_context_add_update)(dev,
158 shared_wred_context_id, wred_profile_id, error);
161 /* Delete shared WRED context */
162 int rte_tm_shared_wred_context_delete(uint16_t port_id,
163 uint32_t shared_wred_context_id,
164 struct rte_tm_error *error)
166 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
167 return RTE_TM_FUNC(port_id, shared_wred_context_delete)(dev,
168 shared_wred_context_id, error);
171 /* Add shaper profile */
172 int rte_tm_shaper_profile_add(uint16_t port_id,
173 uint32_t shaper_profile_id,
174 struct rte_tm_shaper_params *profile,
175 struct rte_tm_error *error)
177 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
178 return RTE_TM_FUNC(port_id, shaper_profile_add)(dev,
179 shaper_profile_id, profile, error);
182 /* Delete WRED profile */
183 int rte_tm_shaper_profile_delete(uint16_t port_id,
184 uint32_t shaper_profile_id,
185 struct rte_tm_error *error)
187 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
188 return RTE_TM_FUNC(port_id, shaper_profile_delete)(dev,
189 shaper_profile_id, error);
192 /* Add shared shaper */
193 int rte_tm_shared_shaper_add_update(uint16_t port_id,
194 uint32_t shared_shaper_id,
195 uint32_t shaper_profile_id,
196 struct rte_tm_error *error)
198 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
199 return RTE_TM_FUNC(port_id, shared_shaper_add_update)(dev,
200 shared_shaper_id, shaper_profile_id, error);
203 /* Delete shared shaper */
204 int rte_tm_shared_shaper_delete(uint16_t port_id,
205 uint32_t shared_shaper_id,
206 struct rte_tm_error *error)
208 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
209 return RTE_TM_FUNC(port_id, shared_shaper_delete)(dev,
210 shared_shaper_id, error);
213 /* Add node to port traffic manager hierarchy */
214 int rte_tm_node_add(uint16_t port_id,
216 uint32_t parent_node_id,
220 struct rte_tm_node_params *params,
221 struct rte_tm_error *error)
223 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
224 return RTE_TM_FUNC(port_id, node_add)(dev,
225 node_id, parent_node_id, priority, weight, level_id,
229 /* Delete node from traffic manager hierarchy */
230 int rte_tm_node_delete(uint16_t port_id,
232 struct rte_tm_error *error)
234 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
235 return RTE_TM_FUNC(port_id, node_delete)(dev,
240 int rte_tm_node_suspend(uint16_t port_id,
242 struct rte_tm_error *error)
244 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
245 return RTE_TM_FUNC(port_id, node_suspend)(dev,
250 int rte_tm_node_resume(uint16_t port_id,
252 struct rte_tm_error *error)
254 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
255 return RTE_TM_FUNC(port_id, node_resume)(dev,
259 /* Commit the initial port traffic manager hierarchy */
260 int rte_tm_hierarchy_commit(uint16_t port_id,
262 struct rte_tm_error *error)
264 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
265 return RTE_TM_FUNC(port_id, hierarchy_commit)(dev,
266 clear_on_fail, error);
269 /* Update node parent */
270 int rte_tm_node_parent_update(uint16_t port_id,
272 uint32_t parent_node_id,
275 struct rte_tm_error *error)
277 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
278 return RTE_TM_FUNC(port_id, node_parent_update)(dev,
279 node_id, parent_node_id, priority, weight, error);
282 /* Update node private shaper */
283 int rte_tm_node_shaper_update(uint16_t port_id,
285 uint32_t shaper_profile_id,
286 struct rte_tm_error *error)
288 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
289 return RTE_TM_FUNC(port_id, node_shaper_update)(dev,
290 node_id, shaper_profile_id, error);
293 /* Update node shared shapers */
294 int rte_tm_node_shared_shaper_update(uint16_t port_id,
296 uint32_t shared_shaper_id,
298 struct rte_tm_error *error)
300 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
301 return RTE_TM_FUNC(port_id, node_shared_shaper_update)(dev,
302 node_id, shared_shaper_id, add, error);
305 /* Update node stats */
306 int rte_tm_node_stats_update(uint16_t port_id,
309 struct rte_tm_error *error)
311 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
312 return RTE_TM_FUNC(port_id, node_stats_update)(dev,
313 node_id, stats_mask, error);
316 /* Update WFQ weight mode */
317 int rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
319 int *wfq_weight_mode,
320 uint32_t n_sp_priorities,
321 struct rte_tm_error *error)
323 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
324 return RTE_TM_FUNC(port_id, node_wfq_weight_mode_update)(dev,
325 node_id, wfq_weight_mode, n_sp_priorities, error);
328 /* Update node congestion management mode */
329 int rte_tm_node_cman_update(uint16_t port_id,
331 enum rte_tm_cman_mode cman,
332 struct rte_tm_error *error)
334 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
335 return RTE_TM_FUNC(port_id, node_cman_update)(dev,
336 node_id, cman, error);
339 /* Update node private WRED context */
340 int rte_tm_node_wred_context_update(uint16_t port_id,
342 uint32_t wred_profile_id,
343 struct rte_tm_error *error)
345 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
346 return RTE_TM_FUNC(port_id, node_wred_context_update)(dev,
347 node_id, wred_profile_id, error);
350 /* Update node shared WRED context */
351 int rte_tm_node_shared_wred_context_update(uint16_t port_id,
353 uint32_t shared_wred_context_id,
355 struct rte_tm_error *error)
357 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
358 return RTE_TM_FUNC(port_id, node_shared_wred_context_update)(dev,
359 node_id, shared_wred_context_id, add, error);
362 /* Read and/or clear stats counters for specific node */
363 int rte_tm_node_stats_read(uint16_t port_id,
365 struct rte_tm_node_stats *stats,
366 uint64_t *stats_mask,
368 struct rte_tm_error *error)
370 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
371 return RTE_TM_FUNC(port_id, node_stats_read)(dev,
372 node_id, stats, stats_mask, clear, error);
375 /* Packet marking - VLAN DEI */
376 int rte_tm_mark_vlan_dei(uint16_t port_id,
380 struct rte_tm_error *error)
382 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
383 return RTE_TM_FUNC(port_id, mark_vlan_dei)(dev,
384 mark_green, mark_yellow, mark_red, error);
387 /* Packet marking - IPv4/IPv6 ECN */
388 int rte_tm_mark_ip_ecn(uint16_t port_id,
392 struct rte_tm_error *error)
394 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
395 return RTE_TM_FUNC(port_id, mark_ip_ecn)(dev,
396 mark_green, mark_yellow, mark_red, error);
399 /* Packet marking - IPv4/IPv6 DSCP */
400 int rte_tm_mark_ip_dscp(uint16_t port_id,
404 struct rte_tm_error *error)
406 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
407 return RTE_TM_FUNC(port_id, mark_ip_dscp)(dev,
408 mark_green, mark_yellow, mark_red, error);