1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020-2021 NXP
5 #include <rte_ethdev.h>
6 #include <rte_malloc.h>
7 #include <rte_tm_driver.h>
9 #include "dpaa2_ethdev.h"
10 #include "dpaa2_pmd_logs.h"
11 #include <dpaa2_hw_dpio.h>
13 #define DPAA2_BURST_MAX (64 * 1024)
15 #define DPAA2_SHAPER_MIN_RATE 0
16 #define DPAA2_SHAPER_MAX_RATE 107374182400ull
17 #define DPAA2_WEIGHT_MAX 24701
18 #define DPAA2_PKT_ADJUST_LEN_MIN 0
19 #define DPAA2_PKT_ADJUST_LEN_MAX 0x7ff
22 dpaa2_tm_init(struct rte_eth_dev *dev)
24 struct dpaa2_dev_priv *priv = dev->data->dev_private;
26 LIST_INIT(&priv->shaper_profiles);
27 LIST_INIT(&priv->nodes);
32 void dpaa2_tm_deinit(struct rte_eth_dev *dev)
34 struct dpaa2_dev_priv *priv = dev->data->dev_private;
35 struct dpaa2_tm_shaper_profile *profile =
36 LIST_FIRST(&priv->shaper_profiles);
37 struct dpaa2_tm_node *node = LIST_FIRST(&priv->nodes);
40 struct dpaa2_tm_shaper_profile *next = LIST_NEXT(profile, next);
42 LIST_REMOVE(profile, next);
48 struct dpaa2_tm_node *next = LIST_NEXT(node, next);
50 LIST_REMOVE(node, next);
56 static struct dpaa2_tm_node *
57 dpaa2_node_from_id(struct dpaa2_dev_priv *priv, uint32_t node_id)
59 struct dpaa2_tm_node *node;
61 LIST_FOREACH(node, &priv->nodes, next)
62 if (node->id == node_id)
69 dpaa2_capabilities_get(struct rte_eth_dev *dev,
70 struct rte_tm_capabilities *cap,
71 struct rte_tm_error *error)
73 struct dpaa2_dev_priv *priv = dev->data->dev_private;
76 return -rte_tm_error_set(error, EINVAL,
77 RTE_TM_ERROR_TYPE_UNSPECIFIED,
78 NULL, "Capabilities are NULL\n");
80 memset(cap, 0, sizeof(*cap));
82 /* root node(port) + channels + txqs number, assuming each TX
83 * Queue is mapped to each TC
85 cap->n_nodes_max = 1 + priv->num_channels + dev->data->nb_tx_queues;
86 cap->n_levels_max = MAX_LEVEL;
87 cap->non_leaf_nodes_identical = 1;
88 cap->leaf_nodes_identical = 1;
90 cap->shaper_n_max = 1 + priv->num_channels; /* LNI + channels */
91 cap->shaper_private_n_max = 1 + priv->num_channels;
92 cap->shaper_private_dual_rate_n_max = 1 + priv->num_channels;
93 cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
94 cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
95 cap->shaper_pkt_length_adjust_min = DPAA2_PKT_ADJUST_LEN_MIN;
96 cap->shaper_pkt_length_adjust_max = DPAA2_PKT_ADJUST_LEN_MAX;
98 if (priv->num_channels > DPNI_MAX_TC)
99 cap->sched_n_children_max = priv->num_channels;
101 cap->sched_n_children_max = DPNI_MAX_TC;
103 cap->sched_sp_n_priorities_max = DPNI_MAX_TC;
104 cap->sched_wfq_n_children_per_group_max = DPNI_MAX_TC;
105 cap->sched_wfq_n_groups_max = 2;
106 cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
107 cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
113 dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
115 struct rte_tm_level_capabilities *cap,
116 struct rte_tm_error *error)
118 struct dpaa2_dev_priv *priv = dev->data->dev_private;
121 return -rte_tm_error_set(error, EINVAL,
122 RTE_TM_ERROR_TYPE_UNSPECIFIED,
125 memset(cap, 0, sizeof(*cap));
127 if (level_id > QUEUE_LEVEL)
128 return -rte_tm_error_set(error, EINVAL,
129 RTE_TM_ERROR_TYPE_LEVEL_ID,
130 NULL, "Wrong level id\n");
132 if (level_id == LNI_LEVEL) { /* Root node (LNI) */
133 cap->n_nodes_max = 1;
134 cap->n_nodes_nonleaf_max = 1;
135 cap->non_leaf_nodes_identical = 1;
137 cap->nonleaf.shaper_private_supported = 1;
138 cap->nonleaf.shaper_private_dual_rate_supported = 1;
139 cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
140 cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
142 cap->nonleaf.sched_n_children_max = priv->num_channels; /* no. of channels */
143 cap->nonleaf.sched_sp_n_priorities_max = 1;
144 cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
145 cap->nonleaf.sched_wfq_n_groups_max = 1;
146 cap->nonleaf.sched_wfq_weight_max = 1;
147 cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
148 RTE_TM_STATS_N_BYTES;
149 } else if (level_id == CHANNEL_LEVEL) { /* channels */
150 cap->n_nodes_max = priv->num_channels;
151 cap->n_nodes_nonleaf_max = priv->num_channels;
152 cap->n_nodes_leaf_max = 0;
153 cap->non_leaf_nodes_identical = 1;
155 cap->nonleaf.shaper_private_supported = 1;
156 cap->nonleaf.shaper_private_dual_rate_supported = 1;
157 cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
158 cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
160 /* no. of class queues per channel */
161 cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
162 cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
163 cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
164 cap->nonleaf.sched_wfq_n_groups_max = 2;
165 cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
166 } else { /* leaf nodes */
167 /* queues per channels * channel */
168 cap->n_nodes_max = priv->num_tx_tc * priv->num_channels;
169 cap->n_nodes_leaf_max = priv->num_tx_tc * priv->num_channels;
170 cap->leaf_nodes_identical = 1;
172 cap->leaf.shaper_private_supported = 0;
173 cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS |
174 RTE_TM_STATS_N_BYTES;
181 dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
182 struct rte_tm_node_capabilities *cap,
183 struct rte_tm_error *error)
185 struct dpaa2_tm_node *node;
186 struct dpaa2_dev_priv *priv = dev->data->dev_private;
189 return -rte_tm_error_set(error, EINVAL,
190 RTE_TM_ERROR_TYPE_UNSPECIFIED,
193 memset(cap, 0, sizeof(*cap));
195 node = dpaa2_node_from_id(priv, node_id);
197 return -rte_tm_error_set(error, ENODEV,
198 RTE_TM_ERROR_TYPE_NODE_ID,
199 NULL, "Node id does not exist\n");
201 if (node->level_id == LNI_LEVEL) {
202 cap->shaper_private_supported = 1;
203 cap->shaper_private_dual_rate_supported = 1;
204 cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
205 cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
207 cap->nonleaf.sched_n_children_max = priv->num_channels;
208 cap->nonleaf.sched_sp_n_priorities_max = 1;
209 cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
210 cap->nonleaf.sched_wfq_n_groups_max = 1;
211 cap->nonleaf.sched_wfq_weight_max = 1;
212 cap->stats_mask = RTE_TM_STATS_N_PKTS |
213 RTE_TM_STATS_N_BYTES;
214 } else if (node->level_id == CHANNEL_LEVEL) {
215 cap->shaper_private_supported = 1;
216 cap->shaper_private_dual_rate_supported = 1;
217 cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
218 cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
220 cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
221 cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
222 cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
223 cap->nonleaf.sched_wfq_n_groups_max = 2;
224 cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
226 cap->stats_mask = RTE_TM_STATS_N_PKTS |
227 RTE_TM_STATS_N_BYTES;
234 dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
235 struct rte_tm_error *error)
237 struct dpaa2_dev_priv *priv = dev->data->dev_private;
238 struct dpaa2_tm_node *node;
241 return -rte_tm_error_set(error, EINVAL,
242 RTE_TM_ERROR_TYPE_UNSPECIFIED,
245 node = dpaa2_node_from_id(priv, node_id);
247 return -rte_tm_error_set(error, ENODEV,
248 RTE_TM_ERROR_TYPE_NODE_ID,
249 NULL, "Node id does not exist\n");
251 *is_leaf = node->type == LEAF_NODE ? 1 : 0;
256 static struct dpaa2_tm_shaper_profile *
257 dpaa2_shaper_profile_from_id(struct dpaa2_dev_priv *priv,
258 uint32_t shaper_profile_id)
260 struct dpaa2_tm_shaper_profile *profile;
262 LIST_FOREACH(profile, &priv->shaper_profiles, next)
263 if (profile->id == shaper_profile_id)
270 dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
271 struct rte_tm_shaper_params *params,
272 struct rte_tm_error *error)
274 struct dpaa2_dev_priv *priv = dev->data->dev_private;
275 struct dpaa2_tm_shaper_profile *profile;
278 return -rte_tm_error_set(error, EINVAL,
279 RTE_TM_ERROR_TYPE_UNSPECIFIED,
281 if (params->committed.rate > DPAA2_SHAPER_MAX_RATE)
282 return -rte_tm_error_set(error, EINVAL,
283 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
284 NULL, "committed rate is out of range\n");
286 if (params->committed.size > DPAA2_BURST_MAX)
287 return -rte_tm_error_set(error, EINVAL,
288 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
289 NULL, "committed size is out of range\n");
291 if (params->peak.rate > DPAA2_SHAPER_MAX_RATE)
292 return -rte_tm_error_set(error, EINVAL,
293 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
294 NULL, "Peak rate is out of range\n");
296 if (params->peak.size > DPAA2_BURST_MAX)
297 return -rte_tm_error_set(error, EINVAL,
298 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
299 NULL, "Peak size is out of range\n");
301 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
302 return -rte_tm_error_set(error, EINVAL,
303 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
304 NULL, "Wrong shaper profile id\n");
306 if (params->pkt_length_adjust > DPAA2_PKT_ADJUST_LEN_MAX ||
307 params->pkt_length_adjust < DPAA2_PKT_ADJUST_LEN_MIN)
308 return -rte_tm_error_set(error, EINVAL,
309 RTE_TM_ERROR_TYPE_CAPABILITIES,
311 "Not supported pkt adjust length\n");
313 profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
315 return -rte_tm_error_set(error, EEXIST,
316 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
317 NULL, "Profile id already exists\n");
319 profile = rte_zmalloc_socket(NULL, sizeof(*profile), 0,
322 return -rte_tm_error_set(error, ENOMEM,
323 RTE_TM_ERROR_TYPE_UNSPECIFIED,
326 profile->id = shaper_profile_id;
327 rte_memcpy(&profile->params, params, sizeof(profile->params));
329 LIST_INSERT_HEAD(&priv->shaper_profiles, profile, next);
335 dpaa2_shaper_profile_delete(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
336 struct rte_tm_error *error)
338 struct dpaa2_dev_priv *priv = dev->data->dev_private;
339 struct dpaa2_tm_shaper_profile *profile;
341 profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
343 return -rte_tm_error_set(error, ENODEV,
344 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
345 NULL, "Profile id does not exist\n");
348 return -rte_tm_error_set(error, EPERM,
349 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
350 NULL, "Profile is used\n");
352 LIST_REMOVE(profile, next);
359 dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
360 __rte_unused uint32_t priority, uint32_t weight,
362 struct rte_tm_node_params *params,
363 struct rte_tm_error *error)
365 if (node_id == RTE_TM_NODE_ID_NULL)
366 return -rte_tm_error_set(error, EINVAL, RTE_TM_NODE_ID_NULL,
367 NULL, "Node id is invalid\n");
369 if (weight > DPAA2_WEIGHT_MAX)
370 return -rte_tm_error_set(error, EINVAL,
371 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
372 NULL, "Weight is out of range\n");
374 if (level_id > QUEUE_LEVEL)
375 return -rte_tm_error_set(error, EINVAL,
376 RTE_TM_ERROR_TYPE_LEVEL_ID,
377 NULL, "Wrong level id\n");
380 return -rte_tm_error_set(error, EINVAL,
381 RTE_TM_ERROR_TYPE_UNSPECIFIED,
384 if (params->shared_shaper_id)
385 return -rte_tm_error_set(error, EINVAL,
386 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
387 NULL, "Shared shaper is not supported\n");
389 if (params->n_shared_shapers)
390 return -rte_tm_error_set(error, EINVAL,
391 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
392 NULL, "Shared shaper is not supported\n");
394 /* verify non leaf nodes settings */
395 if (node_id >= dev->data->nb_tx_queues) {
396 if (params->nonleaf.wfq_weight_mode)
397 return -rte_tm_error_set(error, EINVAL,
398 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
399 NULL, "WFQ weight mode is not supported\n");
401 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
402 return -rte_tm_error_set(error, EINVAL,
403 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
404 NULL, "Private shaper not supported on leaf\n");
407 /* check leaf node */
408 if (level_id == QUEUE_LEVEL) {
409 if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
410 return -rte_tm_error_set(error, ENODEV,
411 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
412 NULL, "Only taildrop is supported\n");
413 if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
414 RTE_TM_STATS_N_BYTES))
415 return -rte_tm_error_set(error, EINVAL,
416 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
418 "Requested port stats are not supported\n");
419 } else if (level_id == LNI_LEVEL) {
420 if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
421 RTE_TM_STATS_N_BYTES))
422 return -rte_tm_error_set(error, EINVAL,
423 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
425 "Requested port stats are not supported\n");
432 dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
433 uint32_t parent_node_id, uint32_t priority, uint32_t weight,
434 uint32_t level_id, struct rte_tm_node_params *params,
435 struct rte_tm_error *error)
437 struct dpaa2_dev_priv *priv = dev->data->dev_private;
438 struct dpaa2_tm_shaper_profile *profile = NULL;
439 struct dpaa2_tm_node *node, *parent = NULL;
442 if (0/* If device is started*/)
443 return -rte_tm_error_set(error, EPERM,
444 RTE_TM_ERROR_TYPE_UNSPECIFIED,
445 NULL, "Port is already started\n");
447 ret = dpaa2_node_check_params(dev, node_id, priority, weight, level_id,
452 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
453 profile = dpaa2_shaper_profile_from_id(priv,
454 params->shaper_profile_id);
456 return -rte_tm_error_set(error, ENODEV,
457 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
458 NULL, "Shaper id does not exist\n");
460 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
461 LIST_FOREACH(node, &priv->nodes, next) {
462 if (node->level_id != LNI_LEVEL)
465 return -rte_tm_error_set(error, EINVAL,
466 RTE_TM_ERROR_TYPE_UNSPECIFIED,
467 NULL, "Root node exists\n");
470 parent = dpaa2_node_from_id(priv, parent_node_id);
472 return -rte_tm_error_set(error, EINVAL,
473 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
474 NULL, "Parent node id not exist\n");
477 node = dpaa2_node_from_id(priv, node_id);
479 return -rte_tm_error_set(error, ENODEV,
480 RTE_TM_ERROR_TYPE_NODE_ID,
481 NULL, "Node id already exists\n");
483 node = rte_zmalloc_socket(NULL, sizeof(*node), 0, rte_socket_id());
485 return -rte_tm_error_set(error, ENOMEM,
486 RTE_TM_ERROR_TYPE_UNSPECIFIED,
491 if (node_id > dev->data->nb_tx_queues)
492 node->type = NON_LEAF_NODE;
494 node->type = LEAF_NODE;
496 node->level_id = level_id;
497 if (node->level_id == CHANNEL_LEVEL) {
498 if (priv->channel_inuse < priv->num_channels) {
499 node->channel_id = priv->channel_inuse;
500 priv->channel_inuse++;
502 printf("error no channel id available\n");
507 node->parent = parent;
511 /* TODO: add check if refcnt is more than supported children */
514 node->profile = profile;
518 node->weight = weight;
519 node->priority = priority;
520 node->stats_mask = params->stats_mask;
522 LIST_INSERT_HEAD(&priv->nodes, node, next);
528 dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
529 struct rte_tm_error *error)
531 struct dpaa2_dev_priv *priv = dev->data->dev_private;
532 struct dpaa2_tm_node *node;
536 return -rte_tm_error_set(error, EPERM,
537 RTE_TM_ERROR_TYPE_UNSPECIFIED,
538 NULL, "Port is already started\n");
541 node = dpaa2_node_from_id(priv, node_id);
543 return -rte_tm_error_set(error, ENODEV,
544 RTE_TM_ERROR_TYPE_NODE_ID,
545 NULL, "Node id does not exist\n");
548 return -rte_tm_error_set(error, EPERM,
549 RTE_TM_ERROR_TYPE_NODE_ID,
550 NULL, "Node id is used\n");
553 node->parent->refcnt--;
556 node->profile->refcnt--;
558 LIST_REMOVE(node, next);
565 dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
569 uint8_t flow_id, options = 0;
570 struct dpni_queue tx_flow_cfg;
571 struct dpni_queue_id qid;
572 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
573 struct dpaa2_dev_priv *priv = dev->data->dev_private;
574 struct dpaa2_queue *dpaa2_q;
576 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
577 dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
578 tc_id = node->parent->tc_id;
579 node->parent->tc_id++;
582 if (dpaa2_q == NULL) {
583 printf("Queue is not configured for node = %d\n", node->id);
587 DPAA2_PMD_DEBUG("tc_id = %d, channel = %d\n\n", tc_id,
588 node->parent->channel_id);
589 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
590 ((node->parent->channel_id << 8) | tc_id),
591 flow_id, options, &tx_flow_cfg);
593 printf("Error in setting the tx flow: "
594 "channel id = %d tc_id= %d, param = 0x%x "
595 "flow=%d err=%d\n", node->parent->channel_id, tc_id,
596 ((node->parent->channel_id << 8) | tc_id), flow_id,
601 dpaa2_q->flow_id = flow_id;
602 dpaa2_q->tc_index = tc_id;
604 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
605 DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
606 dpaa2_q->flow_id, &tx_flow_cfg, &qid);
608 printf("Error in getting LFQID err=%d", ret);
611 dpaa2_q->fqid = qid.fqid;
613 /* setting congestion notification */
614 if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
615 struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
617 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
618 cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc;
619 /* Notify that the queue is not congested when the data in
620 * the queue is below this thershold.(90% of value)
622 cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10;
623 cong_notif_cfg.message_ctx = 0;
624 cong_notif_cfg.message_iova =
625 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
626 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
627 cong_notif_cfg.notification_mode =
628 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
629 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
630 DPNI_CONG_OPT_COHERENT_WRITE;
631 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
633 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
636 ((node->parent->channel_id << 8) | tc_id),
639 printf("Error in setting tx congestion notification: "
649 dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev,
650 struct dpaa2_tm_node **nodes, int n)
652 struct dpaa2_tm_node *temp_node;
656 DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
657 nodes[n - 1]->id, nodes[n - 1]->priority,
659 dpaa2_tm_configure_queue(dev, nodes[n - 1]);
663 for (i = 0; i < n - 1; i++) {
664 if (nodes[i]->priority > nodes[i + 1]->priority) {
665 temp_node = nodes[i];
666 nodes[i] = nodes[i + 1];
667 nodes[i + 1] = temp_node;
670 dpaa2_tm_sort_and_configure(dev, nodes, n - 1);
672 DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
673 nodes[n - 1]->id, nodes[n - 1]->priority,
675 dpaa2_tm_configure_queue(dev, nodes[n - 1]);
679 dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
680 struct rte_tm_error *error)
682 struct dpaa2_dev_priv *priv = dev->data->dev_private;
683 struct dpaa2_tm_node *node;
684 struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
685 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
689 LIST_FOREACH(channel_node, &priv->nodes, next) {
690 struct dpaa2_tm_node *nodes[DPNI_MAX_TC];
693 if (channel_node->level_id != CHANNEL_LEVEL)
696 LIST_FOREACH(leaf_node, &priv->nodes, next) {
697 if (leaf_node->level_id == LNI_LEVEL ||
698 leaf_node->level_id == CHANNEL_LEVEL)
701 if (leaf_node->parent == channel_node) {
702 if (i >= DPNI_MAX_TC) {
703 ret = -rte_tm_error_set(error, EINVAL,
704 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
705 "More children than supported\n");
708 nodes[i++] = leaf_node;
712 DPAA2_PMD_DEBUG("Configure queues\n");
713 dpaa2_tm_sort_and_configure(dev, nodes, i);
718 LIST_FOREACH(node, &priv->nodes, next) {
719 if (node->type == NON_LEAF_NODE) {
722 struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
725 tx_cr_shaper.max_burst_size =
726 node->profile->params.committed.size;
727 tx_cr_shaper.rate_limit =
728 node->profile->params.committed.rate /
730 tx_er_shaper.max_burst_size =
731 node->profile->params.peak.size;
732 tx_er_shaper.rate_limit =
733 node->profile->params.peak.rate / (1024 * 1024);
735 if (node->parent == NULL) {
736 DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u\n",
737 tx_cr_shaper.rate_limit,
738 tx_cr_shaper.max_burst_size);
740 param |= node->profile->params.pkt_length_adjust << 16;
742 DPAA2_PMD_DEBUG("Channel = %d S.rate = %u\n",
744 tx_cr_shaper.rate_limit);
745 param = (node->channel_id << 8);
747 ret = dpni_set_tx_shaping(dpni, 0, priv->token,
748 &tx_cr_shaper, &tx_er_shaper, param);
750 ret = -rte_tm_error_set(error, EINVAL,
751 RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
752 "Error in setting Shaping\n");
759 LIST_FOREACH(channel_node, &priv->nodes, next) {
760 int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
761 struct dpni_tx_priorities_cfg prio_cfg;
763 memset(&prio_cfg, 0, sizeof(prio_cfg));
764 memset(conf, 0, sizeof(conf));
766 /* Process for each channel */
767 if (channel_node->level_id != CHANNEL_LEVEL)
770 LIST_FOREACH(leaf_node, &priv->nodes, next) {
771 struct dpaa2_queue *leaf_dpaa2_q;
774 if (leaf_node->level_id == LNI_LEVEL ||
775 leaf_node->level_id == CHANNEL_LEVEL)
778 /* level 2, all leaf nodes */
779 if (leaf_node->id >= dev->data->nb_tx_queues) {
780 ret = -rte_tm_error_set(error, EINVAL,
781 RTE_TM_ERROR_TYPE_NODE_ID, NULL,
782 "Not enough txqs configured\n");
786 if (conf[leaf_node->id])
789 if (leaf_node->parent != channel_node)
792 leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
793 leaf_tc_id = leaf_dpaa2_q->tc_index;
794 /* Process sibling leaf nodes */
795 LIST_FOREACH(temp_leaf_node, &priv->nodes, next) {
796 if (temp_leaf_node->id == leaf_node->id ||
797 temp_leaf_node->level_id == LNI_LEVEL ||
798 temp_leaf_node->level_id == CHANNEL_LEVEL)
801 if (temp_leaf_node->parent != channel_node)
804 if (conf[temp_leaf_node->id])
807 if (leaf_node->priority == temp_leaf_node->priority) {
808 struct dpaa2_queue *temp_leaf_dpaa2_q;
809 uint8_t temp_leaf_tc_id;
811 temp_leaf_dpaa2_q = (struct dpaa2_queue *)
812 dev->data->tx_queues[temp_leaf_node->id];
813 temp_leaf_tc_id = temp_leaf_dpaa2_q->tc_index;
815 prio_cfg.tc_sched[temp_leaf_tc_id].mode =
816 DPNI_TX_SCHED_WEIGHTED_A;
817 /* DPAA2 support weight in multiple of 100 */
818 prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
819 temp_leaf_node->weight * 100;
820 } else if (wfq_grp == 1) {
821 prio_cfg.tc_sched[temp_leaf_tc_id].mode =
822 DPNI_TX_SCHED_WEIGHTED_B;
823 prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
824 temp_leaf_node->weight * 100;
826 ret = -rte_tm_error_set(error, EINVAL,
827 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
828 "Only 2 WFQ Groups are supported\n");
832 conf[temp_leaf_node->id] = 1;
837 prio_cfg.tc_sched[leaf_tc_id].mode =
838 DPNI_TX_SCHED_WEIGHTED_A;
839 prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
840 leaf_node->weight * 100;
841 prio_cfg.prio_group_A = leaf_node->priority;
842 } else if (wfq_grp == 1) {
843 prio_cfg.tc_sched[leaf_tc_id].mode =
844 DPNI_TX_SCHED_WEIGHTED_B;
845 prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
846 leaf_node->weight * 100;
847 prio_cfg.prio_group_B = leaf_node->priority;
852 conf[leaf_node->id] = 1;
855 prio_cfg.separate_groups = 1;
856 if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
857 prio_cfg.prio_group_A = 0;
858 prio_cfg.prio_group_B = 1;
860 prio_cfg.prio_group_A = 1;
861 prio_cfg.prio_group_B = 0;
865 prio_cfg.prio_group_A = 1;
866 prio_cfg.channel_idx = channel_node->channel_id;
867 ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
869 ret = -rte_tm_error_set(error, EINVAL,
870 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
871 "Scheduling Failed\n");
874 DPAA2_PMD_DEBUG("########################################\n");
875 DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
876 for (t = 0; t < DPNI_MAX_TC; t++) {
877 DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
878 DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
880 DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
881 DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
882 DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
888 dpaa2_tm_deinit(dev);
896 dpaa2_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
897 struct rte_tm_node_stats *stats, uint64_t *stats_mask,
898 int clear, struct rte_tm_error *error)
900 struct dpaa2_dev_priv *priv = dev->data->dev_private;
901 struct dpaa2_tm_node *node;
902 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
903 union dpni_statistics value;
906 node = dpaa2_node_from_id(priv, node_id);
908 return -rte_tm_error_set(error, ENODEV,
909 RTE_TM_ERROR_TYPE_NODE_ID,
910 NULL, "Node id does not exist\n");
913 *stats_mask = node->stats_mask;
918 memset(stats, 0, sizeof(*stats));
919 memset(&value, 0, sizeof(union dpni_statistics));
921 if (node->level_id == LNI_LEVEL) {
924 ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
927 return -rte_tm_error_set(error, -ret,
928 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
929 "Failed to read port statistics\n");
931 if (node->stats_mask & RTE_TM_STATS_N_PKTS)
932 stats->n_pkts = value.page_1.egress_all_frames;
934 if (node->stats_mask & RTE_TM_STATS_N_BYTES)
935 stats->n_bytes = value.page_1.egress_all_bytes;
938 ret = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
939 return -rte_tm_error_set(error, -ret,
940 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
941 "Failed to reset port statistics\n");
943 } else if (node->level_id == QUEUE_LEVEL) {
945 struct dpaa2_queue *dpaa2_q;
946 dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
948 ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
950 (node->parent->channel_id << 8 |
951 dpaa2_q->tc_index), &value);
953 return -rte_tm_error_set(error, -ret,
954 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
955 "Failed to read queue statistics\n");
957 if (node->stats_mask & RTE_TM_STATS_N_PKTS)
958 stats->n_pkts = value.page_3.ceetm_dequeue_frames;
959 if (node->stats_mask & RTE_TM_STATS_N_BYTES)
960 stats->n_bytes = value.page_3.ceetm_dequeue_bytes;
962 return -rte_tm_error_set(error, -1,
963 RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
964 "Failed to read channel statistics\n");
970 const struct rte_tm_ops dpaa2_tm_ops = {
971 .node_type_get = dpaa2_node_type_get,
972 .capabilities_get = dpaa2_capabilities_get,
973 .level_capabilities_get = dpaa2_level_capabilities_get,
974 .node_capabilities_get = dpaa2_node_capabilities_get,
975 .shaper_profile_add = dpaa2_shaper_profile_add,
976 .shaper_profile_delete = dpaa2_shaper_profile_delete,
977 .node_add = dpaa2_node_add,
978 .node_delete = dpaa2_node_delete,
979 .hierarchy_commit = dpaa2_hierarchy_commit,
980 .node_stats_read = dpaa2_node_stats_read,