1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <rte_ethdev.h>
6 #include <rte_malloc.h>
7 #include <rte_tm_driver.h>
9 #include "dpaa2_ethdev.h"
11 #define DPAA2_BURST_MAX (64 * 1024)
13 #define DPAA2_SHAPER_MIN_RATE 0
14 #define DPAA2_SHAPER_MAX_RATE 107374182400ull
15 #define DPAA2_WEIGHT_MAX 24701
18 dpaa2_tm_init(struct rte_eth_dev *dev)
20 struct dpaa2_dev_priv *priv = dev->data->dev_private;
22 LIST_INIT(&priv->shaper_profiles);
23 LIST_INIT(&priv->nodes);
28 void dpaa2_tm_deinit(struct rte_eth_dev *dev)
30 struct dpaa2_dev_priv *priv = dev->data->dev_private;
31 struct dpaa2_tm_shaper_profile *profile =
32 LIST_FIRST(&priv->shaper_profiles);
33 struct dpaa2_tm_node *node = LIST_FIRST(&priv->nodes);
36 struct dpaa2_tm_shaper_profile *next = LIST_NEXT(profile, next);
38 LIST_REMOVE(profile, next);
44 struct dpaa2_tm_node *next = LIST_NEXT(node, next);
46 LIST_REMOVE(node, next);
52 static struct dpaa2_tm_node *
53 dpaa2_node_from_id(struct dpaa2_dev_priv *priv, uint32_t node_id)
55 struct dpaa2_tm_node *node;
57 LIST_FOREACH(node, &priv->nodes, next)
58 if (node->id == node_id)
65 dpaa2_capabilities_get(struct rte_eth_dev *dev,
66 struct rte_tm_capabilities *cap,
67 struct rte_tm_error *error)
70 return -rte_tm_error_set(error, EINVAL,
71 RTE_TM_ERROR_TYPE_UNSPECIFIED,
72 NULL, "Capabilities are NULL\n");
74 memset(cap, 0, sizeof(*cap));
76 /* root node(port) + txqs number, assuming each TX
77 * Queue is mapped to each TC
79 cap->n_nodes_max = 1 + dev->data->nb_tx_queues;
80 cap->n_levels_max = 2; /* port level + txqs level */
81 cap->non_leaf_nodes_identical = 1;
82 cap->leaf_nodes_identical = 1;
84 cap->shaper_n_max = 1;
85 cap->shaper_private_n_max = 1;
86 cap->shaper_private_dual_rate_n_max = 1;
87 cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
88 cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
90 cap->sched_n_children_max = dev->data->nb_tx_queues;
91 cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
92 cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
93 cap->sched_wfq_n_groups_max = 2;
94 cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
96 cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_STATS;
97 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
103 dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
105 struct rte_tm_level_capabilities *cap,
106 struct rte_tm_error *error)
109 return -rte_tm_error_set(error, EINVAL,
110 RTE_TM_ERROR_TYPE_UNSPECIFIED,
113 memset(cap, 0, sizeof(*cap));
116 return -rte_tm_error_set(error, EINVAL,
117 RTE_TM_ERROR_TYPE_LEVEL_ID,
118 NULL, "Wrong level id\n");
120 if (level_id == 0) { /* Root node */
121 cap->n_nodes_max = 1;
122 cap->n_nodes_nonleaf_max = 1;
123 cap->non_leaf_nodes_identical = 1;
125 cap->nonleaf.shaper_private_supported = 1;
126 cap->nonleaf.shaper_private_dual_rate_supported = 1;
127 cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
128 cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
130 cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
131 cap->nonleaf.sched_sp_n_priorities_max = 1;
132 cap->nonleaf.sched_wfq_n_children_per_group_max =
133 dev->data->nb_tx_queues;
134 cap->nonleaf.sched_wfq_n_groups_max = 2;
135 cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
136 cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
137 RTE_TM_STATS_N_BYTES;
138 } else { /* leaf nodes */
139 cap->n_nodes_max = dev->data->nb_tx_queues;
140 cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
141 cap->leaf_nodes_identical = 1;
143 cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
150 dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
151 struct rte_tm_node_capabilities *cap,
152 struct rte_tm_error *error)
154 struct dpaa2_tm_node *node;
155 struct dpaa2_dev_priv *priv = dev->data->dev_private;
158 return -rte_tm_error_set(error, EINVAL,
159 RTE_TM_ERROR_TYPE_UNSPECIFIED,
162 memset(cap, 0, sizeof(*cap));
164 node = dpaa2_node_from_id(priv, node_id);
166 return -rte_tm_error_set(error, ENODEV,
167 RTE_TM_ERROR_TYPE_NODE_ID,
168 NULL, "Node id does not exist\n");
170 if (node->type == 0) {
171 cap->shaper_private_supported = 1;
173 cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
174 cap->nonleaf.sched_sp_n_priorities_max = 1;
175 cap->nonleaf.sched_wfq_n_children_per_group_max =
176 dev->data->nb_tx_queues;
177 cap->nonleaf.sched_wfq_n_groups_max = 2;
178 cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
179 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
181 cap->stats_mask = RTE_TM_STATS_N_PKTS;
188 dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
189 struct rte_tm_error *error)
191 struct dpaa2_dev_priv *priv = dev->data->dev_private;
192 struct dpaa2_tm_node *node;
195 return -rte_tm_error_set(error, EINVAL,
196 RTE_TM_ERROR_TYPE_UNSPECIFIED,
199 node = dpaa2_node_from_id(priv, node_id);
201 return -rte_tm_error_set(error, ENODEV,
202 RTE_TM_ERROR_TYPE_NODE_ID,
203 NULL, "Node id does not exist\n");
205 *is_leaf = node->type == 1/*NODE_QUEUE*/ ? 1 : 0;
210 static struct dpaa2_tm_shaper_profile *
211 dpaa2_shaper_profile_from_id(struct dpaa2_dev_priv *priv,
212 uint32_t shaper_profile_id)
214 struct dpaa2_tm_shaper_profile *profile;
216 LIST_FOREACH(profile, &priv->shaper_profiles, next)
217 if (profile->id == shaper_profile_id)
224 dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
225 struct rte_tm_shaper_params *params,
226 struct rte_tm_error *error)
228 struct dpaa2_dev_priv *priv = dev->data->dev_private;
229 struct dpaa2_tm_shaper_profile *profile;
232 return -rte_tm_error_set(error, EINVAL,
233 RTE_TM_ERROR_TYPE_UNSPECIFIED,
235 if (params->committed.rate > DPAA2_SHAPER_MAX_RATE)
236 return -rte_tm_error_set(error, EINVAL,
237 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
238 NULL, "committed rate is out of range\n");
240 if (params->committed.size > DPAA2_BURST_MAX)
241 return -rte_tm_error_set(error, EINVAL,
242 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
243 NULL, "committed size is out of range\n");
245 if (params->peak.rate > DPAA2_SHAPER_MAX_RATE)
246 return -rte_tm_error_set(error, EINVAL,
247 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
248 NULL, "Peak rate is out of range\n");
250 if (params->peak.size > DPAA2_BURST_MAX)
251 return -rte_tm_error_set(error, EINVAL,
252 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
253 NULL, "Peak size is out of range\n");
255 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
256 return -rte_tm_error_set(error, EINVAL,
257 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
258 NULL, "Wrong shaper profile id\n");
260 profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
262 return -rte_tm_error_set(error, EEXIST,
263 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
264 NULL, "Profile id already exists\n");
266 profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0,
269 return -rte_tm_error_set(error, ENOMEM,
270 RTE_TM_ERROR_TYPE_UNSPECIFIED,
273 profile->id = shaper_profile_id;
274 rte_memcpy(&profile->params, params, sizeof(profile->params));
276 LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next);
282 dpaa2_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
283 struct rte_tm_error *error)
285 struct dpaa2_dev_priv *priv = dev->data->dev_private;
286 struct dpaa2_tm_shaper_profile *profile;
288 profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
290 return -rte_tm_error_set(error, ENODEV,
291 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
292 NULL, "Profile id does not exist\n");
295 return -rte_tm_error_set(error, EPERM,
296 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
297 NULL, "Profile is used\n");
299 LIST_REMOVE(profile, next);
306 dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
307 __rte_unused uint32_t priority, uint32_t weight,
309 struct rte_tm_node_params *params,
310 struct rte_tm_error *error)
312 if (node_id == RTE_TM_NODE_ID_NULL)
313 return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL,
314 NULL, "Node id is invalid\n");
316 if (weight > DPAA2_WEIGHT_MAX)
317 return -rte_tm_error_set(error, EINVAL,
318 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
319 NULL, "Weight is out of range\n");
321 if (level_id != 0 && level_id != 1)
322 return -rte_tm_error_set(error, EINVAL,
323 RTE_TM_ERROR_TYPE_LEVEL_ID,
324 NULL, "Wrong level id\n");
327 return -rte_tm_error_set(error, EINVAL,
328 RTE_TM_ERROR_TYPE_UNSPECIFIED,
331 if (params->shared_shaper_id)
332 return -rte_tm_error_set(error, EINVAL,
333 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
334 NULL, "Shared shaper is not supported\n");
336 if (params->n_shared_shapers)
337 return -rte_tm_error_set(error, EINVAL,
338 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
339 NULL, "Shared shaper is not supported\n");
341 /* verify port (root node) settings */
342 if (node_id >= dev->data->nb_tx_queues) {
343 if (params->nonleaf.wfq_weight_mode)
344 return -rte_tm_error_set(error, EINVAL,
345 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
346 NULL, "WFQ weight mode is not supported\n");
348 if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
349 RTE_TM_STATS_N_BYTES))
350 return -rte_tm_error_set(error, EINVAL,
351 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
353 "Requested port stats are not supported\n");
357 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
358 return -rte_tm_error_set(error, EINVAL,
359 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
360 NULL, "Private shaper not supported on leaf\n");
362 if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
363 return -rte_tm_error_set(error, EINVAL,
364 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
366 "Requested stats are not supported\n");
368 /* check leaf node */
370 if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
371 return -rte_tm_error_set(error, ENODEV,
372 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
373 NULL, "Only taildrop is supported\n");
380 dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
381 uint32_t parent_node_id, uint32_t priority, uint32_t weight,
382 uint32_t level_id, struct rte_tm_node_params *params,
383 struct rte_tm_error *error)
385 struct dpaa2_dev_priv *priv = dev->data->dev_private;
386 struct dpaa2_tm_shaper_profile *profile = NULL;
387 struct dpaa2_tm_node *node, *parent = NULL;
390 if (0/* If device is started*/)
391 return -rte_tm_error_set(error, EPERM,
392 RTE_TM_ERROR_TYPE_UNSPECIFIED,
393 NULL, "Port is already started\n");
395 ret = dpaa2_node_check_params(dev, node_id, priority, weight, level_id,
400 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
401 profile = dpaa2_shaper_profile_from_id(priv,
402 params->shaper_profile_id);
404 return -rte_tm_error_set(error, ENODEV,
405 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
406 NULL, "Shaper id does not exist\n");
408 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
409 LIST_FOREACH(node, &priv->nodes, next) {
410 if (node->type != 0 /*root node*/)
413 return -rte_tm_error_set(error, EINVAL,
414 RTE_TM_ERROR_TYPE_UNSPECIFIED,
415 NULL, "Root node exists\n");
418 parent = dpaa2_node_from_id(priv, parent_node_id);
420 return -rte_tm_error_set(error, EINVAL,
421 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
422 NULL, "Parent node id not exist\n");
425 node = dpaa2_node_from_id(priv, node_id);
427 return -rte_tm_error_set(error, ENODEV,
428 RTE_TM_ERROR_TYPE_NODE_ID,
429 NULL, "Node id already exists\n");
431 node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id());
433 return -rte_tm_error_set(error, ENOMEM,
434 RTE_TM_ERROR_TYPE_UNSPECIFIED,
438 node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? 0/*NODE_PORT*/ :
442 node->parent = parent;
447 node->profile = profile;
451 node->weight = weight;
452 node->priority = priority;
453 node->stats_mask = params->stats_mask;
455 LIST_INSERT_HEAD(&priv->nodes, node, next);
461 dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
462 struct rte_tm_error *error)
464 struct dpaa2_dev_priv *priv = dev->data->dev_private;
465 struct dpaa2_tm_node *node;
468 return -rte_tm_error_set(error, EPERM,
469 RTE_TM_ERROR_TYPE_UNSPECIFIED,
470 NULL, "Port is already started\n");
473 node = dpaa2_node_from_id(priv, node_id);
475 return -rte_tm_error_set(error, ENODEV,
476 RTE_TM_ERROR_TYPE_NODE_ID,
477 NULL, "Node id does not exist\n");
480 return -rte_tm_error_set(error, EPERM,
481 RTE_TM_ERROR_TYPE_NODE_ID,
482 NULL, "Node id is used\n");
485 node->parent->refcnt--;
488 node->profile->refcnt--;
490 LIST_REMOVE(node, next);
497 dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
498 struct rte_tm_error *error)
500 struct dpaa2_dev_priv *priv = dev->data->dev_private;
501 struct dpaa2_tm_node *node, *temp_node;
502 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
504 int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
505 struct dpni_tx_priorities_cfg prio_cfg;
507 memset(&prio_cfg, 0, sizeof(prio_cfg));
508 memset(conf, 0, sizeof(conf));
510 LIST_FOREACH(node, &priv->nodes, next) {
511 if (node->type == 0/*root node*/) {
515 struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
517 tx_cr_shaper.max_burst_size =
518 node->profile->params.committed.size;
519 tx_cr_shaper.rate_limit =
520 node->profile->params.committed.rate / (1024 * 1024);
521 tx_er_shaper.max_burst_size =
522 node->profile->params.peak.size;
523 tx_er_shaper.rate_limit =
524 node->profile->params.peak.rate / (1024 * 1024);
525 ret = dpni_set_tx_shaping(dpni, 0, priv->token,
526 &tx_cr_shaper, &tx_er_shaper, 0);
528 ret = -rte_tm_error_set(error, EINVAL,
529 RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
530 "Error in setting Shaping\n");
535 } else { /* level 1, all leaf nodes */
536 if (node->id >= dev->data->nb_tx_queues) {
537 ret = -rte_tm_error_set(error, EINVAL,
538 RTE_TM_ERROR_TYPE_NODE_ID, NULL,
539 "Not enough txqs configured\n");
546 LIST_FOREACH(temp_node, &priv->nodes, next) {
547 if (temp_node->id == node->id ||
548 temp_node->type == 0)
550 if (conf[temp_node->id])
552 if (node->priority == temp_node->priority) {
554 prio_cfg.tc_sched[temp_node->id].mode =
555 DPNI_TX_SCHED_WEIGHTED_A;
556 /* DPDK support lowest weight 1
557 * and DPAA2 platform 100
559 prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
560 temp_node->weight + 99;
561 } else if (wfq_grp == 1) {
562 prio_cfg.tc_sched[temp_node->id].mode =
563 DPNI_TX_SCHED_WEIGHTED_B;
564 prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
565 temp_node->weight + 99;
567 /*TODO: add one more check for
568 * number of nodes in a group
570 ret = -rte_tm_error_set(error, EINVAL,
571 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
572 "Only 2 WFQ Groups are supported\n");
575 conf[temp_node->id] = 1;
581 prio_cfg.tc_sched[node->id].mode =
582 DPNI_TX_SCHED_WEIGHTED_A;
583 prio_cfg.tc_sched[node->id].delta_bandwidth =
585 prio_cfg.prio_group_A = node->priority;
586 } else if (wfq_grp == 1) {
587 prio_cfg.tc_sched[node->id].mode =
588 DPNI_TX_SCHED_WEIGHTED_B;
589 prio_cfg.tc_sched[node->id].delta_bandwidth =
591 prio_cfg.prio_group_B = node->priority;
599 prio_cfg.separate_groups = 1;
601 ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
603 ret = -rte_tm_error_set(error, EINVAL,
604 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
605 "Scheduling Failed\n");
613 dpaa2_tm_deinit(dev);
620 const struct rte_tm_ops dpaa2_tm_ops = {
621 .node_type_get = dpaa2_node_type_get,
622 .capabilities_get = dpaa2_capabilities_get,
623 .level_capabilities_get = dpaa2_level_capabilities_get,
624 .node_capabilities_get = dpaa2_node_capabilities_get,
625 .shaper_profile_add = dpaa2_shaper_profile_add,
626 .shaper_profile_delete = dpaa2_shaper_profile_delete,
627 .node_add = dpaa2_node_add,
628 .node_delete = dpaa2_node_delete,
629 .hierarchy_commit = dpaa2_hierarchy_commit,