1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
5 #include <rte_malloc.h>
7 #include "txgbe_ethdev.h"
9 static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
10 struct rte_tm_capabilities *cap,
11 struct rte_tm_error *error);
12 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
14 struct rte_tm_level_capabilities *cap,
15 struct rte_tm_error *error);
16 static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
18 struct rte_tm_node_capabilities *cap,
19 struct rte_tm_error *error);
21 const struct rte_tm_ops txgbe_tm_ops = {
22 .capabilities_get = txgbe_tm_capabilities_get,
23 .level_capabilities_get = txgbe_level_capabilities_get,
24 .node_capabilities_get = txgbe_node_capabilities_get,
28 txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
34 *(const void **)arg = &txgbe_tm_ops;
40 txgbe_tm_conf_init(struct rte_eth_dev *dev)
42 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
44 /* initialize shaper profile list */
45 TAILQ_INIT(&tm_conf->shaper_profile_list);
47 /* initialize node configuration */
49 TAILQ_INIT(&tm_conf->queue_list);
50 TAILQ_INIT(&tm_conf->tc_list);
51 tm_conf->nb_tc_node = 0;
52 tm_conf->nb_queue_node = 0;
53 tm_conf->committed = false;
57 txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
59 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
60 struct txgbe_tm_shaper_profile *shaper_profile;
61 struct txgbe_tm_node *tm_node;
63 /* clear node configuration */
64 while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
65 TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
68 tm_conf->nb_queue_node = 0;
69 while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
70 TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
73 tm_conf->nb_tc_node = 0;
75 rte_free(tm_conf->root);
79 /* Remove all shaper profiles */
80 while ((shaper_profile =
81 TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
82 TAILQ_REMOVE(&tm_conf->shaper_profile_list,
83 shaper_profile, node);
84 rte_free(shaper_profile);
89 txgbe_tc_nb_get(struct rte_eth_dev *dev)
91 struct rte_eth_conf *eth_conf;
94 eth_conf = &dev->data->dev_conf;
95 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
96 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
97 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
98 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
111 txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
112 struct rte_tm_capabilities *cap,
113 struct rte_tm_error *error)
115 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
116 uint8_t tc_nb = txgbe_tc_nb_get(dev);
121 if (tc_nb > hw->mac.max_tx_queues)
124 error->type = RTE_TM_ERROR_TYPE_NONE;
126 /* set all the parameters to 0 first. */
127 memset(cap, 0, sizeof(struct rte_tm_capabilities));
130 * here is the max capability not the current configuration.
132 /* port + TCs + queues */
133 cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
134 hw->mac.max_tx_queues;
135 cap->n_levels_max = 3;
136 cap->non_leaf_nodes_identical = 1;
137 cap->leaf_nodes_identical = 1;
138 cap->shaper_n_max = cap->n_nodes_max;
139 cap->shaper_private_n_max = cap->n_nodes_max;
140 cap->shaper_private_dual_rate_n_max = 0;
141 cap->shaper_private_rate_min = 0;
142 /* 10Gbps -> 1.25GBps */
143 cap->shaper_private_rate_max = 1250000000ull;
144 cap->shaper_shared_n_max = 0;
145 cap->shaper_shared_n_nodes_per_shaper_max = 0;
146 cap->shaper_shared_n_shapers_per_node_max = 0;
147 cap->shaper_shared_dual_rate_n_max = 0;
148 cap->shaper_shared_rate_min = 0;
149 cap->shaper_shared_rate_max = 0;
150 cap->sched_n_children_max = hw->mac.max_tx_queues;
152 * HW supports SP. But no plan to support it now.
153 * So, all the nodes should have the same priority.
155 cap->sched_sp_n_priorities_max = 1;
156 cap->sched_wfq_n_children_per_group_max = 0;
157 cap->sched_wfq_n_groups_max = 0;
159 * SW only supports fair round robin now.
160 * So, all the nodes should have the same weight.
162 cap->sched_wfq_weight_max = 1;
163 cap->cman_head_drop_supported = 0;
164 cap->dynamic_update_mask = 0;
165 cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
166 cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
167 cap->cman_wred_context_n_max = 0;
168 cap->cman_wred_context_private_n_max = 0;
169 cap->cman_wred_context_shared_n_max = 0;
170 cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
171 cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
177 static inline struct txgbe_tm_node *
178 txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
179 enum txgbe_tm_node_type *node_type)
181 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
182 struct txgbe_tm_node *tm_node;
184 if (tm_conf->root && tm_conf->root->id == node_id) {
185 *node_type = TXGBE_TM_NODE_TYPE_PORT;
186 return tm_conf->root;
189 TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
190 if (tm_node->id == node_id) {
191 *node_type = TXGBE_TM_NODE_TYPE_TC;
196 TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
197 if (tm_node->id == node_id) {
198 *node_type = TXGBE_TM_NODE_TYPE_QUEUE;
207 txgbe_level_capabilities_get(struct rte_eth_dev *dev,
209 struct rte_tm_level_capabilities *cap,
210 struct rte_tm_error *error)
212 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
217 if (level_id >= TXGBE_TM_NODE_TYPE_MAX) {
218 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
219 error->message = "too deep level";
224 if (level_id == TXGBE_TM_NODE_TYPE_PORT) {
225 cap->n_nodes_max = 1;
226 cap->n_nodes_nonleaf_max = 1;
227 cap->n_nodes_leaf_max = 0;
228 } else if (level_id == TXGBE_TM_NODE_TYPE_TC) {
230 cap->n_nodes_max = TXGBE_DCB_TC_MAX;
231 cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX;
232 cap->n_nodes_leaf_max = 0;
235 cap->n_nodes_max = hw->mac.max_tx_queues;
236 cap->n_nodes_nonleaf_max = 0;
237 cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
240 cap->non_leaf_nodes_identical = true;
241 cap->leaf_nodes_identical = true;
243 if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) {
244 cap->nonleaf.shaper_private_supported = true;
245 cap->nonleaf.shaper_private_dual_rate_supported = false;
246 cap->nonleaf.shaper_private_rate_min = 0;
247 /* 10Gbps -> 1.25GBps */
248 cap->nonleaf.shaper_private_rate_max = 1250000000ull;
249 cap->nonleaf.shaper_shared_n_max = 0;
250 if (level_id == TXGBE_TM_NODE_TYPE_PORT)
251 cap->nonleaf.sched_n_children_max =
254 cap->nonleaf.sched_n_children_max =
255 hw->mac.max_tx_queues;
256 cap->nonleaf.sched_sp_n_priorities_max = 1;
257 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
258 cap->nonleaf.sched_wfq_n_groups_max = 0;
259 cap->nonleaf.sched_wfq_weight_max = 1;
260 cap->nonleaf.stats_mask = 0;
266 cap->leaf.shaper_private_supported = true;
267 cap->leaf.shaper_private_dual_rate_supported = false;
268 cap->leaf.shaper_private_rate_min = 0;
269 /* 10Gbps -> 1.25GBps */
270 cap->leaf.shaper_private_rate_max = 1250000000ull;
271 cap->leaf.shaper_shared_n_max = 0;
272 cap->leaf.cman_head_drop_supported = false;
273 cap->leaf.cman_wred_context_private_supported = true;
274 cap->leaf.cman_wred_context_shared_n_max = 0;
275 cap->leaf.stats_mask = 0;
281 txgbe_node_capabilities_get(struct rte_eth_dev *dev,
283 struct rte_tm_node_capabilities *cap,
284 struct rte_tm_error *error)
286 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
287 enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
288 struct txgbe_tm_node *tm_node;
293 if (node_id == RTE_TM_NODE_ID_NULL) {
294 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
295 error->message = "invalid node id";
299 /* check if the node id exists */
300 tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
302 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
303 error->message = "no such node";
307 cap->shaper_private_supported = true;
308 cap->shaper_private_dual_rate_supported = false;
309 cap->shaper_private_rate_min = 0;
310 /* 10Gbps -> 1.25GBps */
311 cap->shaper_private_rate_max = 1250000000ull;
312 cap->shaper_shared_n_max = 0;
314 if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) {
315 cap->leaf.cman_head_drop_supported = false;
316 cap->leaf.cman_wred_context_private_supported = true;
317 cap->leaf.cman_wred_context_shared_n_max = 0;
319 if (node_type == TXGBE_TM_NODE_TYPE_PORT)
320 cap->nonleaf.sched_n_children_max =
323 cap->nonleaf.sched_n_children_max =
324 hw->mac.max_tx_queues;
325 cap->nonleaf.sched_sp_n_priorities_max = 1;
326 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
327 cap->nonleaf.sched_wfq_n_groups_max = 0;
328 cap->nonleaf.sched_wfq_weight_max = 1;