4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_errno.h>
37 #include "rte_ethdev.h"
38 #include "rte_tm_driver.h"
41 /* Get generic traffic manager operations structure from a port. */
42 const struct rte_tm_ops *
43 rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error)
45 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
46 const struct rte_tm_ops *ops;
48 if (!rte_eth_dev_is_valid_port(port_id)) {
49 rte_tm_error_set(error,
51 RTE_TM_ERROR_TYPE_UNSPECIFIED,
53 rte_strerror(ENODEV));
57 if ((dev->dev_ops->tm_ops_get == NULL) ||
58 (dev->dev_ops->tm_ops_get(dev, &ops) != 0) ||
60 rte_tm_error_set(error,
62 RTE_TM_ERROR_TYPE_UNSPECIFIED,
64 rte_strerror(ENOSYS));
71 #define RTE_TM_FUNC(port_id, func) \
73 const struct rte_tm_ops *ops = \
74 rte_tm_ops_get(port_id, error); \
78 if (ops->func == NULL) \
79 return -rte_tm_error_set(error, \
81 RTE_TM_ERROR_TYPE_UNSPECIFIED, \
83 rte_strerror(ENOSYS)); \
88 /* Get number of leaf nodes */
90 rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
91 uint32_t *n_leaf_nodes,
92 struct rte_tm_error *error)
94 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
95 const struct rte_tm_ops *ops =
96 rte_tm_ops_get(port_id, error);
101 if (n_leaf_nodes == NULL) {
102 rte_tm_error_set(error,
104 RTE_TM_ERROR_TYPE_UNSPECIFIED,
106 rte_strerror(EINVAL));
110 *n_leaf_nodes = dev->data->nb_tx_queues;
114 /* Check node type (leaf or non-leaf) */
116 rte_tm_node_type_get(uint8_t port_id,
119 struct rte_tm_error *error)
121 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
122 return RTE_TM_FUNC(port_id, node_type_get)(dev,
123 node_id, is_leaf, error);
126 /* Get capabilities */
127 int rte_tm_capabilities_get(uint8_t port_id,
128 struct rte_tm_capabilities *cap,
129 struct rte_tm_error *error)
131 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
132 return RTE_TM_FUNC(port_id, capabilities_get)(dev,
136 /* Get level capabilities */
137 int rte_tm_level_capabilities_get(uint8_t port_id,
139 struct rte_tm_level_capabilities *cap,
140 struct rte_tm_error *error)
142 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
143 return RTE_TM_FUNC(port_id, level_capabilities_get)(dev,
144 level_id, cap, error);
147 /* Get node capabilities */
148 int rte_tm_node_capabilities_get(uint8_t port_id,
150 struct rte_tm_node_capabilities *cap,
151 struct rte_tm_error *error)
153 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
154 return RTE_TM_FUNC(port_id, node_capabilities_get)(dev,
155 node_id, cap, error);
158 /* Add WRED profile */
159 int rte_tm_wred_profile_add(uint8_t port_id,
160 uint32_t wred_profile_id,
161 struct rte_tm_wred_params *profile,
162 struct rte_tm_error *error)
164 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
165 return RTE_TM_FUNC(port_id, wred_profile_add)(dev,
166 wred_profile_id, profile, error);
169 /* Delete WRED profile */
170 int rte_tm_wred_profile_delete(uint8_t port_id,
171 uint32_t wred_profile_id,
172 struct rte_tm_error *error)
174 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
175 return RTE_TM_FUNC(port_id, wred_profile_delete)(dev,
176 wred_profile_id, error);
179 /* Add/update shared WRED context */
180 int rte_tm_shared_wred_context_add_update(uint8_t port_id,
181 uint32_t shared_wred_context_id,
182 uint32_t wred_profile_id,
183 struct rte_tm_error *error)
185 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
186 return RTE_TM_FUNC(port_id, shared_wred_context_add_update)(dev,
187 shared_wred_context_id, wred_profile_id, error);
190 /* Delete shared WRED context */
191 int rte_tm_shared_wred_context_delete(uint8_t port_id,
192 uint32_t shared_wred_context_id,
193 struct rte_tm_error *error)
195 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
196 return RTE_TM_FUNC(port_id, shared_wred_context_delete)(dev,
197 shared_wred_context_id, error);
200 /* Add shaper profile */
201 int rte_tm_shaper_profile_add(uint8_t port_id,
202 uint32_t shaper_profile_id,
203 struct rte_tm_shaper_params *profile,
204 struct rte_tm_error *error)
206 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
207 return RTE_TM_FUNC(port_id, shaper_profile_add)(dev,
208 shaper_profile_id, profile, error);
211 /* Delete WRED profile */
212 int rte_tm_shaper_profile_delete(uint8_t port_id,
213 uint32_t shaper_profile_id,
214 struct rte_tm_error *error)
216 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
217 return RTE_TM_FUNC(port_id, shaper_profile_delete)(dev,
218 shaper_profile_id, error);
221 /* Add shared shaper */
222 int rte_tm_shared_shaper_add_update(uint8_t port_id,
223 uint32_t shared_shaper_id,
224 uint32_t shaper_profile_id,
225 struct rte_tm_error *error)
227 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
228 return RTE_TM_FUNC(port_id, shared_shaper_add_update)(dev,
229 shared_shaper_id, shaper_profile_id, error);
232 /* Delete shared shaper */
233 int rte_tm_shared_shaper_delete(uint8_t port_id,
234 uint32_t shared_shaper_id,
235 struct rte_tm_error *error)
237 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
238 return RTE_TM_FUNC(port_id, shared_shaper_delete)(dev,
239 shared_shaper_id, error);
242 /* Add node to port traffic manager hierarchy */
243 int rte_tm_node_add(uint8_t port_id,
245 uint32_t parent_node_id,
249 struct rte_tm_node_params *params,
250 struct rte_tm_error *error)
252 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
253 return RTE_TM_FUNC(port_id, node_add)(dev,
254 node_id, parent_node_id, priority, weight, level_id,
258 /* Delete node from traffic manager hierarchy */
259 int rte_tm_node_delete(uint8_t port_id,
261 struct rte_tm_error *error)
263 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
264 return RTE_TM_FUNC(port_id, node_delete)(dev,
269 int rte_tm_node_suspend(uint8_t port_id,
271 struct rte_tm_error *error)
273 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
274 return RTE_TM_FUNC(port_id, node_suspend)(dev,
279 int rte_tm_node_resume(uint8_t port_id,
281 struct rte_tm_error *error)
283 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
284 return RTE_TM_FUNC(port_id, node_resume)(dev,
288 /* Commit the initial port traffic manager hierarchy */
289 int rte_tm_hierarchy_commit(uint8_t port_id,
291 struct rte_tm_error *error)
293 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
294 return RTE_TM_FUNC(port_id, hierarchy_commit)(dev,
295 clear_on_fail, error);
298 /* Update node parent */
299 int rte_tm_node_parent_update(uint8_t port_id,
301 uint32_t parent_node_id,
304 struct rte_tm_error *error)
306 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
307 return RTE_TM_FUNC(port_id, node_parent_update)(dev,
308 node_id, parent_node_id, priority, weight, error);
311 /* Update node private shaper */
312 int rte_tm_node_shaper_update(uint8_t port_id,
314 uint32_t shaper_profile_id,
315 struct rte_tm_error *error)
317 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
318 return RTE_TM_FUNC(port_id, node_shaper_update)(dev,
319 node_id, shaper_profile_id, error);
322 /* Update node shared shapers */
323 int rte_tm_node_shared_shaper_update(uint8_t port_id,
325 uint32_t shared_shaper_id,
327 struct rte_tm_error *error)
329 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
330 return RTE_TM_FUNC(port_id, node_shared_shaper_update)(dev,
331 node_id, shared_shaper_id, add, error);
334 /* Update node stats */
335 int rte_tm_node_stats_update(uint8_t port_id,
338 struct rte_tm_error *error)
340 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
341 return RTE_TM_FUNC(port_id, node_stats_update)(dev,
342 node_id, stats_mask, error);
345 /* Update WFQ weight mode */
346 int rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
348 int *wfq_weight_mode,
349 uint32_t n_sp_priorities,
350 struct rte_tm_error *error)
352 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
353 return RTE_TM_FUNC(port_id, node_wfq_weight_mode_update)(dev,
354 node_id, wfq_weight_mode, n_sp_priorities, error);
357 /* Update node congestion management mode */
358 int rte_tm_node_cman_update(uint8_t port_id,
360 enum rte_tm_cman_mode cman,
361 struct rte_tm_error *error)
363 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
364 return RTE_TM_FUNC(port_id, node_cman_update)(dev,
365 node_id, cman, error);
368 /* Update node private WRED context */
369 int rte_tm_node_wred_context_update(uint8_t port_id,
371 uint32_t wred_profile_id,
372 struct rte_tm_error *error)
374 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
375 return RTE_TM_FUNC(port_id, node_wred_context_update)(dev,
376 node_id, wred_profile_id, error);
379 /* Update node shared WRED context */
380 int rte_tm_node_shared_wred_context_update(uint8_t port_id,
382 uint32_t shared_wred_context_id,
384 struct rte_tm_error *error)
386 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
387 return RTE_TM_FUNC(port_id, node_shared_wred_context_update)(dev,
388 node_id, shared_wred_context_id, add, error);
391 /* Read and/or clear stats counters for specific node */
392 int rte_tm_node_stats_read(uint8_t port_id,
394 struct rte_tm_node_stats *stats,
395 uint64_t *stats_mask,
397 struct rte_tm_error *error)
399 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
400 return RTE_TM_FUNC(port_id, node_stats_read)(dev,
401 node_id, stats, stats_mask, clear, error);
404 /* Packet marking - VLAN DEI */
405 int rte_tm_mark_vlan_dei(uint8_t port_id,
409 struct rte_tm_error *error)
411 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
412 return RTE_TM_FUNC(port_id, mark_vlan_dei)(dev,
413 mark_green, mark_yellow, mark_red, error);
416 /* Packet marking - IPv4/IPv6 ECN */
417 int rte_tm_mark_ip_ecn(uint8_t port_id,
421 struct rte_tm_error *error)
423 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
424 return RTE_TM_FUNC(port_id, mark_ip_ecn)(dev,
425 mark_green, mark_yellow, mark_red, error);
428 /* Packet marking - IPv4/IPv6 DSCP */
429 int rte_tm_mark_ip_dscp(uint8_t port_id,
433 struct rte_tm_error *error)
435 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
436 return RTE_TM_FUNC(port_id, mark_ip_dscp)(dev,
437 mark_green, mark_yellow, mark_red, error);