1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 softnic_tmgr_init(struct pmd_internals *p)
21 TAILQ_INIT(&p->tmgr_port_list);
27 softnic_tmgr_free(struct pmd_internals *p)
30 struct softnic_tmgr_port *tmgr_port;
32 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33 if (tmgr_port == NULL)
36 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
41 struct softnic_tmgr_port *
42 softnic_tmgr_port_find(struct pmd_internals *p,
45 struct softnic_tmgr_port *tmgr_port;
50 TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
51 if (strcmp(tmgr_port->name, name) == 0)
57 struct softnic_tmgr_port *
58 softnic_tmgr_port_create(struct pmd_internals *p,
60 struct rte_sched_port *sched)
62 struct softnic_tmgr_port *tmgr_port;
64 /* Check input params */
66 softnic_tmgr_port_find(p, name) ||
73 tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
74 if (tmgr_port == NULL)
78 strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
81 /* Node add to list */
82 TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
88 tm_hierarchy_init(struct pmd_internals *p)
90 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
92 /* Initialize shaper profile list */
93 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
95 /* Initialize shared shaper list */
96 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
98 /* Initialize wred profile list */
99 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
101 /* Initialize TM node list */
102 TAILQ_INIT(&p->soft.tm.h.nodes);
106 tm_hierarchy_uninit(struct pmd_internals *p)
108 /* Remove all nodes*/
110 struct tm_node *tm_node;
112 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
116 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
120 /* Remove all WRED profiles */
122 struct tm_wred_profile *wred_profile;
124 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
125 if (wred_profile == NULL)
128 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
132 /* Remove all shared shapers */
134 struct tm_shared_shaper *shared_shaper;
136 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
137 if (shared_shaper == NULL)
140 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
144 /* Remove all shaper profiles */
146 struct tm_shaper_profile *shaper_profile;
148 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
149 if (shaper_profile == NULL)
152 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
153 shaper_profile, node);
154 free(shaper_profile);
157 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
161 tm_init(struct pmd_internals *p)
163 tm_hierarchy_init(p);
169 tm_free(struct pmd_internals *p)
171 tm_hierarchy_uninit(p);
175 tm_start(struct pmd_internals *p)
177 struct softnic_tmgr_port *tmgr_port;
178 struct tm_params *t = &p->soft.tm.params;
179 struct rte_sched_port *sched;
180 uint32_t n_subports, subport_id;
183 /* Is hierarchy frozen? */
184 if (p->soft.tm.hierarchy_frozen == 0)
188 sched = rte_sched_port_config(&t->port_params);
193 n_subports = t->port_params.n_subports_per_port;
194 for (subport_id = 0; subport_id < n_subports; subport_id++) {
195 uint32_t n_pipes_per_subport =
196 t->port_params.n_pipes_per_subport;
199 status = rte_sched_subport_config(sched,
201 &t->subport_params[subport_id]);
203 rte_sched_port_free(sched);
208 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
209 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
210 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
212 int profile_id = t->pipe_to_profile[pos];
217 status = rte_sched_pipe_config(sched,
222 rte_sched_port_free(sched);
228 tmgr_port = softnic_tmgr_port_create(p, "TMGR", sched);
229 if (tmgr_port == NULL) {
230 rte_sched_port_free(sched);
235 p->soft.tm.sched = sched;
241 tm_stop(struct pmd_internals *p)
243 if (p->soft.tm.sched) {
244 rte_sched_port_free(p->soft.tm.sched);
245 p->soft.tm.sched = NULL;
247 /* Unfreeze hierarchy */
248 p->soft.tm.hierarchy_frozen = 0;
251 static struct tm_shaper_profile *
252 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
254 struct pmd_internals *p = dev->data->dev_private;
255 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
256 struct tm_shaper_profile *sp;
258 TAILQ_FOREACH(sp, spl, node)
259 if (shaper_profile_id == sp->shaper_profile_id)
265 static struct tm_shared_shaper *
266 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
268 struct pmd_internals *p = dev->data->dev_private;
269 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
270 struct tm_shared_shaper *ss;
272 TAILQ_FOREACH(ss, ssl, node)
273 if (shared_shaper_id == ss->shared_shaper_id)
279 static struct tm_wred_profile *
280 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
282 struct pmd_internals *p = dev->data->dev_private;
283 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
284 struct tm_wred_profile *wp;
286 TAILQ_FOREACH(wp, wpl, node)
287 if (wred_profile_id == wp->wred_profile_id)
293 static struct tm_node *
294 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
296 struct pmd_internals *p = dev->data->dev_private;
297 struct tm_node_list *nl = &p->soft.tm.h.nodes;
300 TAILQ_FOREACH(n, nl, node)
301 if (n->node_id == node_id)
307 static struct tm_node *
308 tm_root_node_present(struct rte_eth_dev *dev)
310 struct pmd_internals *p = dev->data->dev_private;
311 struct tm_node_list *nl = &p->soft.tm.h.nodes;
314 TAILQ_FOREACH(n, nl, node)
315 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
322 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
324 struct pmd_internals *p = dev->data->dev_private;
325 struct tm_node_list *nl = &p->soft.tm.h.nodes;
330 TAILQ_FOREACH(ns, nl, node) {
331 if (ns->level != TM_NODE_LEVEL_SUBPORT)
334 if (ns->node_id == subport_node->node_id)
344 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
346 struct pmd_internals *p = dev->data->dev_private;
347 struct tm_node_list *nl = &p->soft.tm.h.nodes;
352 TAILQ_FOREACH(np, nl, node) {
353 if (np->level != TM_NODE_LEVEL_PIPE ||
354 np->parent_node_id != pipe_node->parent_node_id)
357 if (np->node_id == pipe_node->node_id)
367 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
369 return tc_node->priority;
373 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
375 struct pmd_internals *p = dev->data->dev_private;
376 struct tm_node_list *nl = &p->soft.tm.h.nodes;
381 TAILQ_FOREACH(nq, nl, node) {
382 if (nq->level != TM_NODE_LEVEL_QUEUE ||
383 nq->parent_node_id != queue_node->parent_node_id)
386 if (nq->node_id == queue_node->node_id)
396 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
398 struct pmd_internals *p = dev->data->dev_private;
399 uint32_t n_queues_max = p->params.tm.n_queues;
400 uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
401 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
402 uint32_t n_subports_max = n_pipes_max;
403 uint32_t n_root_max = 1;
406 case TM_NODE_LEVEL_PORT:
408 case TM_NODE_LEVEL_SUBPORT:
409 return n_subports_max;
410 case TM_NODE_LEVEL_PIPE:
412 case TM_NODE_LEVEL_TC:
414 case TM_NODE_LEVEL_QUEUE:
420 /* Traffic manager node type get */
422 pmd_tm_node_type_get(struct rte_eth_dev *dev,
425 struct rte_tm_error *error)
427 struct pmd_internals *p = dev->data->dev_private;
430 return -rte_tm_error_set(error,
432 RTE_TM_ERROR_TYPE_UNSPECIFIED,
434 rte_strerror(EINVAL));
436 if (node_id == RTE_TM_NODE_ID_NULL ||
437 (tm_node_search(dev, node_id) == NULL))
438 return -rte_tm_error_set(error,
440 RTE_TM_ERROR_TYPE_NODE_ID,
442 rte_strerror(EINVAL));
444 *is_leaf = node_id < p->params.tm.n_queues;
450 #define WRED_SUPPORTED 1
452 #define WRED_SUPPORTED 0
455 #define STATS_MASK_DEFAULT \
456 (RTE_TM_STATS_N_PKTS | \
457 RTE_TM_STATS_N_BYTES | \
458 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
459 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
461 #define STATS_MASK_QUEUE \
462 (STATS_MASK_DEFAULT | \
463 RTE_TM_STATS_N_PKTS_QUEUED)
465 static const struct rte_tm_capabilities tm_cap = {
466 .n_nodes_max = UINT32_MAX,
467 .n_levels_max = TM_NODE_LEVEL_MAX,
469 .non_leaf_nodes_identical = 0,
470 .leaf_nodes_identical = 1,
472 .shaper_n_max = UINT32_MAX,
473 .shaper_private_n_max = UINT32_MAX,
474 .shaper_private_dual_rate_n_max = 0,
475 .shaper_private_rate_min = 1,
476 .shaper_private_rate_max = UINT32_MAX,
478 .shaper_shared_n_max = UINT32_MAX,
479 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
480 .shaper_shared_n_shapers_per_node_max = 1,
481 .shaper_shared_dual_rate_n_max = 0,
482 .shaper_shared_rate_min = 1,
483 .shaper_shared_rate_max = UINT32_MAX,
485 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
486 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
488 .sched_n_children_max = UINT32_MAX,
489 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
490 .sched_wfq_n_children_per_group_max = UINT32_MAX,
491 .sched_wfq_n_groups_max = 1,
492 .sched_wfq_weight_max = UINT32_MAX,
494 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
495 .cman_wred_byte_mode_supported = 0,
496 .cman_head_drop_supported = 0,
497 .cman_wred_context_n_max = 0,
498 .cman_wred_context_private_n_max = 0,
499 .cman_wred_context_shared_n_max = 0,
500 .cman_wred_context_shared_n_nodes_per_context_max = 0,
501 .cman_wred_context_shared_n_contexts_per_node_max = 0,
503 .mark_vlan_dei_supported = {0, 0, 0},
504 .mark_ip_ecn_tcp_supported = {0, 0, 0},
505 .mark_ip_ecn_sctp_supported = {0, 0, 0},
506 .mark_ip_dscp_supported = {0, 0, 0},
508 .dynamic_update_mask = 0,
510 .stats_mask = STATS_MASK_QUEUE,
513 /* Traffic manager capabilities get */
515 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
516 struct rte_tm_capabilities *cap,
517 struct rte_tm_error *error)
520 return -rte_tm_error_set(error,
522 RTE_TM_ERROR_TYPE_CAPABILITIES,
524 rte_strerror(EINVAL));
526 memcpy(cap, &tm_cap, sizeof(*cap));
528 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
529 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
530 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
531 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
532 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
534 cap->shaper_private_n_max =
535 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
536 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
537 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
538 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
540 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
541 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
543 cap->shaper_n_max = cap->shaper_private_n_max +
544 cap->shaper_shared_n_max;
546 cap->shaper_shared_n_nodes_per_shaper_max =
547 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
549 cap->sched_n_children_max = RTE_MAX(
550 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
551 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
553 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
556 cap->cman_wred_context_private_n_max =
557 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
559 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
560 cap->cman_wred_context_shared_n_max;
565 static const struct rte_tm_level_capabilities tm_level_cap[] = {
566 [TM_NODE_LEVEL_PORT] = {
568 .n_nodes_nonleaf_max = 1,
569 .n_nodes_leaf_max = 0,
570 .non_leaf_nodes_identical = 1,
571 .leaf_nodes_identical = 0,
574 .shaper_private_supported = 1,
575 .shaper_private_dual_rate_supported = 0,
576 .shaper_private_rate_min = 1,
577 .shaper_private_rate_max = UINT32_MAX,
578 .shaper_shared_n_max = 0,
580 .sched_n_children_max = UINT32_MAX,
581 .sched_sp_n_priorities_max = 1,
582 .sched_wfq_n_children_per_group_max = UINT32_MAX,
583 .sched_wfq_n_groups_max = 1,
584 .sched_wfq_weight_max = 1,
586 .stats_mask = STATS_MASK_DEFAULT,
590 [TM_NODE_LEVEL_SUBPORT] = {
591 .n_nodes_max = UINT32_MAX,
592 .n_nodes_nonleaf_max = UINT32_MAX,
593 .n_nodes_leaf_max = 0,
594 .non_leaf_nodes_identical = 1,
595 .leaf_nodes_identical = 0,
598 .shaper_private_supported = 1,
599 .shaper_private_dual_rate_supported = 0,
600 .shaper_private_rate_min = 1,
601 .shaper_private_rate_max = UINT32_MAX,
602 .shaper_shared_n_max = 0,
604 .sched_n_children_max = UINT32_MAX,
605 .sched_sp_n_priorities_max = 1,
606 .sched_wfq_n_children_per_group_max = UINT32_MAX,
607 .sched_wfq_n_groups_max = 1,
608 #ifdef RTE_SCHED_SUBPORT_TC_OV
609 .sched_wfq_weight_max = UINT32_MAX,
611 .sched_wfq_weight_max = 1,
613 .stats_mask = STATS_MASK_DEFAULT,
617 [TM_NODE_LEVEL_PIPE] = {
618 .n_nodes_max = UINT32_MAX,
619 .n_nodes_nonleaf_max = UINT32_MAX,
620 .n_nodes_leaf_max = 0,
621 .non_leaf_nodes_identical = 1,
622 .leaf_nodes_identical = 0,
625 .shaper_private_supported = 1,
626 .shaper_private_dual_rate_supported = 0,
627 .shaper_private_rate_min = 1,
628 .shaper_private_rate_max = UINT32_MAX,
629 .shaper_shared_n_max = 0,
631 .sched_n_children_max =
632 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
633 .sched_sp_n_priorities_max =
634 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
635 .sched_wfq_n_children_per_group_max = 1,
636 .sched_wfq_n_groups_max = 0,
637 .sched_wfq_weight_max = 1,
639 .stats_mask = STATS_MASK_DEFAULT,
643 [TM_NODE_LEVEL_TC] = {
644 .n_nodes_max = UINT32_MAX,
645 .n_nodes_nonleaf_max = UINT32_MAX,
646 .n_nodes_leaf_max = 0,
647 .non_leaf_nodes_identical = 1,
648 .leaf_nodes_identical = 0,
651 .shaper_private_supported = 1,
652 .shaper_private_dual_rate_supported = 0,
653 .shaper_private_rate_min = 1,
654 .shaper_private_rate_max = UINT32_MAX,
655 .shaper_shared_n_max = 1,
657 .sched_n_children_max =
658 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
659 .sched_sp_n_priorities_max = 1,
660 .sched_wfq_n_children_per_group_max =
661 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
662 .sched_wfq_n_groups_max = 1,
663 .sched_wfq_weight_max = UINT32_MAX,
665 .stats_mask = STATS_MASK_DEFAULT,
669 [TM_NODE_LEVEL_QUEUE] = {
670 .n_nodes_max = UINT32_MAX,
671 .n_nodes_nonleaf_max = 0,
672 .n_nodes_leaf_max = UINT32_MAX,
673 .non_leaf_nodes_identical = 0,
674 .leaf_nodes_identical = 1,
677 .shaper_private_supported = 0,
678 .shaper_private_dual_rate_supported = 0,
679 .shaper_private_rate_min = 0,
680 .shaper_private_rate_max = 0,
681 .shaper_shared_n_max = 0,
683 .cman_head_drop_supported = 0,
684 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
685 .cman_wred_byte_mode_supported = 0,
686 .cman_wred_context_private_supported = WRED_SUPPORTED,
687 .cman_wred_context_shared_n_max = 0,
689 .stats_mask = STATS_MASK_QUEUE,
694 /* Traffic manager level capabilities get */
696 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
698 struct rte_tm_level_capabilities *cap,
699 struct rte_tm_error *error)
702 return -rte_tm_error_set(error,
704 RTE_TM_ERROR_TYPE_CAPABILITIES,
706 rte_strerror(EINVAL));
708 if (level_id >= TM_NODE_LEVEL_MAX)
709 return -rte_tm_error_set(error,
711 RTE_TM_ERROR_TYPE_LEVEL_ID,
713 rte_strerror(EINVAL));
715 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
718 case TM_NODE_LEVEL_PORT:
719 cap->nonleaf.sched_n_children_max =
720 tm_level_get_max_nodes(dev,
721 TM_NODE_LEVEL_SUBPORT);
722 cap->nonleaf.sched_wfq_n_children_per_group_max =
723 cap->nonleaf.sched_n_children_max;
726 case TM_NODE_LEVEL_SUBPORT:
727 cap->n_nodes_max = tm_level_get_max_nodes(dev,
728 TM_NODE_LEVEL_SUBPORT);
729 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
730 cap->nonleaf.sched_n_children_max =
731 tm_level_get_max_nodes(dev,
733 cap->nonleaf.sched_wfq_n_children_per_group_max =
734 cap->nonleaf.sched_n_children_max;
737 case TM_NODE_LEVEL_PIPE:
738 cap->n_nodes_max = tm_level_get_max_nodes(dev,
740 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
743 case TM_NODE_LEVEL_TC:
744 cap->n_nodes_max = tm_level_get_max_nodes(dev,
746 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
749 case TM_NODE_LEVEL_QUEUE:
751 cap->n_nodes_max = tm_level_get_max_nodes(dev,
752 TM_NODE_LEVEL_QUEUE);
753 cap->n_nodes_leaf_max = cap->n_nodes_max;
760 static const struct rte_tm_node_capabilities tm_node_cap[] = {
761 [TM_NODE_LEVEL_PORT] = {
762 .shaper_private_supported = 1,
763 .shaper_private_dual_rate_supported = 0,
764 .shaper_private_rate_min = 1,
765 .shaper_private_rate_max = UINT32_MAX,
766 .shaper_shared_n_max = 0,
769 .sched_n_children_max = UINT32_MAX,
770 .sched_sp_n_priorities_max = 1,
771 .sched_wfq_n_children_per_group_max = UINT32_MAX,
772 .sched_wfq_n_groups_max = 1,
773 .sched_wfq_weight_max = 1,
776 .stats_mask = STATS_MASK_DEFAULT,
779 [TM_NODE_LEVEL_SUBPORT] = {
780 .shaper_private_supported = 1,
781 .shaper_private_dual_rate_supported = 0,
782 .shaper_private_rate_min = 1,
783 .shaper_private_rate_max = UINT32_MAX,
784 .shaper_shared_n_max = 0,
787 .sched_n_children_max = UINT32_MAX,
788 .sched_sp_n_priorities_max = 1,
789 .sched_wfq_n_children_per_group_max = UINT32_MAX,
790 .sched_wfq_n_groups_max = 1,
791 .sched_wfq_weight_max = UINT32_MAX,
794 .stats_mask = STATS_MASK_DEFAULT,
797 [TM_NODE_LEVEL_PIPE] = {
798 .shaper_private_supported = 1,
799 .shaper_private_dual_rate_supported = 0,
800 .shaper_private_rate_min = 1,
801 .shaper_private_rate_max = UINT32_MAX,
802 .shaper_shared_n_max = 0,
805 .sched_n_children_max =
806 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
807 .sched_sp_n_priorities_max =
808 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
809 .sched_wfq_n_children_per_group_max = 1,
810 .sched_wfq_n_groups_max = 0,
811 .sched_wfq_weight_max = 1,
814 .stats_mask = STATS_MASK_DEFAULT,
817 [TM_NODE_LEVEL_TC] = {
818 .shaper_private_supported = 1,
819 .shaper_private_dual_rate_supported = 0,
820 .shaper_private_rate_min = 1,
821 .shaper_private_rate_max = UINT32_MAX,
822 .shaper_shared_n_max = 1,
825 .sched_n_children_max =
826 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
827 .sched_sp_n_priorities_max = 1,
828 .sched_wfq_n_children_per_group_max =
829 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
830 .sched_wfq_n_groups_max = 1,
831 .sched_wfq_weight_max = UINT32_MAX,
834 .stats_mask = STATS_MASK_DEFAULT,
837 [TM_NODE_LEVEL_QUEUE] = {
838 .shaper_private_supported = 0,
839 .shaper_private_dual_rate_supported = 0,
840 .shaper_private_rate_min = 0,
841 .shaper_private_rate_max = 0,
842 .shaper_shared_n_max = 0,
846 .cman_head_drop_supported = 0,
847 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
848 .cman_wred_byte_mode_supported = 0,
849 .cman_wred_context_private_supported = WRED_SUPPORTED,
850 .cman_wred_context_shared_n_max = 0,
853 .stats_mask = STATS_MASK_QUEUE,
857 /* Traffic manager node capabilities get */
859 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
861 struct rte_tm_node_capabilities *cap,
862 struct rte_tm_error *error)
864 struct tm_node *tm_node;
867 return -rte_tm_error_set(error,
869 RTE_TM_ERROR_TYPE_CAPABILITIES,
871 rte_strerror(EINVAL));
873 tm_node = tm_node_search(dev, node_id);
875 return -rte_tm_error_set(error,
877 RTE_TM_ERROR_TYPE_NODE_ID,
879 rte_strerror(EINVAL));
881 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
883 switch (tm_node->level) {
884 case TM_NODE_LEVEL_PORT:
885 cap->nonleaf.sched_n_children_max =
886 tm_level_get_max_nodes(dev,
887 TM_NODE_LEVEL_SUBPORT);
888 cap->nonleaf.sched_wfq_n_children_per_group_max =
889 cap->nonleaf.sched_n_children_max;
892 case TM_NODE_LEVEL_SUBPORT:
893 cap->nonleaf.sched_n_children_max =
894 tm_level_get_max_nodes(dev,
896 cap->nonleaf.sched_wfq_n_children_per_group_max =
897 cap->nonleaf.sched_n_children_max;
900 case TM_NODE_LEVEL_PIPE:
901 case TM_NODE_LEVEL_TC:
902 case TM_NODE_LEVEL_QUEUE:
911 shaper_profile_check(struct rte_eth_dev *dev,
912 uint32_t shaper_profile_id,
913 struct rte_tm_shaper_params *profile,
914 struct rte_tm_error *error)
916 struct tm_shaper_profile *sp;
918 /* Shaper profile ID must not be NONE. */
919 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
920 return -rte_tm_error_set(error,
922 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
924 rte_strerror(EINVAL));
926 /* Shaper profile must not exist. */
927 sp = tm_shaper_profile_search(dev, shaper_profile_id);
929 return -rte_tm_error_set(error,
931 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
933 rte_strerror(EEXIST));
935 /* Profile must not be NULL. */
937 return -rte_tm_error_set(error,
939 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
941 rte_strerror(EINVAL));
943 /* Peak rate: non-zero, 32-bit */
944 if (profile->peak.rate == 0 ||
945 profile->peak.rate >= UINT32_MAX)
946 return -rte_tm_error_set(error,
948 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
950 rte_strerror(EINVAL));
952 /* Peak size: non-zero, 32-bit */
953 if (profile->peak.size == 0 ||
954 profile->peak.size >= UINT32_MAX)
955 return -rte_tm_error_set(error,
957 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
959 rte_strerror(EINVAL));
961 /* Dual-rate profiles are not supported. */
962 if (profile->committed.rate != 0)
963 return -rte_tm_error_set(error,
965 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
967 rte_strerror(EINVAL));
969 /* Packet length adjust: 24 bytes */
970 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
971 return -rte_tm_error_set(error,
973 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
975 rte_strerror(EINVAL));
980 /* Traffic manager shaper profile add */
982 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
983 uint32_t shaper_profile_id,
984 struct rte_tm_shaper_params *profile,
985 struct rte_tm_error *error)
987 struct pmd_internals *p = dev->data->dev_private;
988 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
989 struct tm_shaper_profile *sp;
992 /* Check input params */
993 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
997 /* Memory allocation */
998 sp = calloc(1, sizeof(struct tm_shaper_profile));
1000 return -rte_tm_error_set(error,
1002 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1004 rte_strerror(ENOMEM));
1007 sp->shaper_profile_id = shaper_profile_id;
1008 memcpy(&sp->params, profile, sizeof(sp->params));
1011 TAILQ_INSERT_TAIL(spl, sp, node);
1012 p->soft.tm.h.n_shaper_profiles++;
1017 /* Traffic manager shaper profile delete */
1019 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1020 uint32_t shaper_profile_id,
1021 struct rte_tm_error *error)
1023 struct pmd_internals *p = dev->data->dev_private;
1024 struct tm_shaper_profile *sp;
1026 /* Check existing */
1027 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1029 return -rte_tm_error_set(error,
1031 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1033 rte_strerror(EINVAL));
1037 return -rte_tm_error_set(error,
1039 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1041 rte_strerror(EBUSY));
1043 /* Remove from list */
1044 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1045 p->soft.tm.h.n_shaper_profiles--;
1051 static struct tm_node *
1052 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1053 struct tm_shared_shaper *ss)
1055 struct pmd_internals *p = dev->data->dev_private;
1056 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1059 /* Subport: each TC uses shared shaper */
1060 TAILQ_FOREACH(n, nl, node) {
1061 if (n->level != TM_NODE_LEVEL_TC ||
1062 n->params.n_shared_shapers == 0 ||
1063 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1073 update_subport_tc_rate(struct rte_eth_dev *dev,
1075 struct tm_shared_shaper *ss,
1076 struct tm_shaper_profile *sp_new)
1078 struct pmd_internals *p = dev->data->dev_private;
1079 uint32_t tc_id = tm_node_tc_id(dev, nt);
1081 struct tm_node *np = nt->parent_node;
1083 struct tm_node *ns = np->parent_node;
1084 uint32_t subport_id = tm_node_subport_id(dev, ns);
1086 struct rte_sched_subport_params subport_params;
1088 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1089 ss->shaper_profile_id);
1091 /* Derive new subport configuration. */
1092 memcpy(&subport_params,
1093 &p->soft.tm.params.subport_params[subport_id],
1094 sizeof(subport_params));
1095 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1097 /* Update the subport configuration. */
1098 if (rte_sched_subport_config(p->soft.tm.sched,
1099 subport_id, &subport_params))
1102 /* Commit changes. */
1105 ss->shaper_profile_id = sp_new->shaper_profile_id;
1108 memcpy(&p->soft.tm.params.subport_params[subport_id],
1110 sizeof(subport_params));
1115 /* Traffic manager shared shaper add/update */
1117 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1118 uint32_t shared_shaper_id,
1119 uint32_t shaper_profile_id,
1120 struct rte_tm_error *error)
1122 struct pmd_internals *p = dev->data->dev_private;
1123 struct tm_shared_shaper *ss;
1124 struct tm_shaper_profile *sp;
1127 /* Shaper profile must be valid. */
1128 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1130 return -rte_tm_error_set(error,
1132 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1134 rte_strerror(EINVAL));
1137 * Add new shared shaper
1139 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1141 struct tm_shared_shaper_list *ssl =
1142 &p->soft.tm.h.shared_shapers;
1144 /* Hierarchy must not be frozen */
1145 if (p->soft.tm.hierarchy_frozen)
1146 return -rte_tm_error_set(error,
1148 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1150 rte_strerror(EBUSY));
1152 /* Memory allocation */
1153 ss = calloc(1, sizeof(struct tm_shared_shaper));
1155 return -rte_tm_error_set(error,
1157 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1159 rte_strerror(ENOMEM));
1162 ss->shared_shaper_id = shared_shaper_id;
1163 ss->shaper_profile_id = shaper_profile_id;
1166 TAILQ_INSERT_TAIL(ssl, ss, node);
1167 p->soft.tm.h.n_shared_shapers++;
1173 * Update existing shared shaper
1175 /* Hierarchy must be frozen (run-time update) */
1176 if (p->soft.tm.hierarchy_frozen == 0)
1177 return -rte_tm_error_set(error,
1179 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1181 rte_strerror(EBUSY));
1184 /* Propagate change. */
1185 nt = tm_shared_shaper_get_tc(dev, ss);
1186 if (update_subport_tc_rate(dev, nt, ss, sp))
1187 return -rte_tm_error_set(error,
1189 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1191 rte_strerror(EINVAL));
1196 /* Traffic manager shared shaper delete */
1198 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1199 uint32_t shared_shaper_id,
1200 struct rte_tm_error *error)
1202 struct pmd_internals *p = dev->data->dev_private;
1203 struct tm_shared_shaper *ss;
1205 /* Check existing */
1206 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1208 return -rte_tm_error_set(error,
1210 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1212 rte_strerror(EINVAL));
1216 return -rte_tm_error_set(error,
1218 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1220 rte_strerror(EBUSY));
1222 /* Remove from list */
1223 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1224 p->soft.tm.h.n_shared_shapers--;
1231 wred_profile_check(struct rte_eth_dev *dev,
1232 uint32_t wred_profile_id,
1233 struct rte_tm_wred_params *profile,
1234 struct rte_tm_error *error)
1236 struct tm_wred_profile *wp;
1237 enum rte_tm_color color;
1239 /* WRED profile ID must not be NONE. */
1240 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1241 return -rte_tm_error_set(error,
1243 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1245 rte_strerror(EINVAL));
1247 /* WRED profile must not exist. */
1248 wp = tm_wred_profile_search(dev, wred_profile_id);
1250 return -rte_tm_error_set(error,
1252 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1254 rte_strerror(EEXIST));
1256 /* Profile must not be NULL. */
1257 if (profile == NULL)
1258 return -rte_tm_error_set(error,
1260 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1262 rte_strerror(EINVAL));
1264 /* WRED profile should be in packet mode */
1265 if (profile->packet_mode == 0)
1266 return -rte_tm_error_set(error,
1268 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1270 rte_strerror(ENOTSUP));
1272 /* min_th <= max_th, max_th > 0 */
1273 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1274 uint32_t min_th = profile->red_params[color].min_th;
1275 uint32_t max_th = profile->red_params[color].max_th;
1277 if (min_th > max_th ||
1279 min_th > UINT16_MAX ||
1280 max_th > UINT16_MAX)
1281 return -rte_tm_error_set(error,
1283 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1285 rte_strerror(EINVAL));
1291 /* Traffic manager WRED profile add */
1293 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1294 uint32_t wred_profile_id,
1295 struct rte_tm_wred_params *profile,
1296 struct rte_tm_error *error)
1298 struct pmd_internals *p = dev->data->dev_private;
1299 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1300 struct tm_wred_profile *wp;
1303 /* Check input params */
1304 status = wred_profile_check(dev, wred_profile_id, profile, error);
1308 /* Memory allocation */
1309 wp = calloc(1, sizeof(struct tm_wred_profile));
1311 return -rte_tm_error_set(error,
1313 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1315 rte_strerror(ENOMEM));
1318 wp->wred_profile_id = wred_profile_id;
1319 memcpy(&wp->params, profile, sizeof(wp->params));
1322 TAILQ_INSERT_TAIL(wpl, wp, node);
1323 p->soft.tm.h.n_wred_profiles++;
1328 /* Traffic manager WRED profile delete */
1330 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1331 uint32_t wred_profile_id,
1332 struct rte_tm_error *error)
1334 struct pmd_internals *p = dev->data->dev_private;
1335 struct tm_wred_profile *wp;
1337 /* Check existing */
1338 wp = tm_wred_profile_search(dev, wred_profile_id);
1340 return -rte_tm_error_set(error,
1342 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1344 rte_strerror(EINVAL));
1348 return -rte_tm_error_set(error,
1350 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1352 rte_strerror(EBUSY));
1354 /* Remove from list */
1355 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1356 p->soft.tm.h.n_wred_profiles--;
1363 node_add_check_port(struct rte_eth_dev *dev,
1365 uint32_t parent_node_id __rte_unused,
1368 uint32_t level_id __rte_unused,
1369 struct rte_tm_node_params *params,
1370 struct rte_tm_error *error)
1372 struct pmd_internals *p = dev->data->dev_private;
1373 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1374 params->shaper_profile_id);
1376 /* node type: non-leaf */
1377 if (node_id < p->params.tm.n_queues)
1378 return -rte_tm_error_set(error,
1380 RTE_TM_ERROR_TYPE_NODE_ID,
1382 rte_strerror(EINVAL));
1384 /* Priority must be 0 */
1386 return -rte_tm_error_set(error,
1388 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1390 rte_strerror(EINVAL));
1392 /* Weight must be 1 */
1394 return -rte_tm_error_set(error,
1396 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1398 rte_strerror(EINVAL));
1400 /* Shaper must be valid */
1401 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1403 return -rte_tm_error_set(error,
1405 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1407 rte_strerror(EINVAL));
1409 /* No shared shapers */
1410 if (params->n_shared_shapers != 0)
1411 return -rte_tm_error_set(error,
1413 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1415 rte_strerror(EINVAL));
1417 /* Number of SP priorities must be 1 */
1418 if (params->nonleaf.n_sp_priorities != 1)
1419 return -rte_tm_error_set(error,
1421 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1423 rte_strerror(EINVAL));
1426 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1427 return -rte_tm_error_set(error,
1429 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1431 rte_strerror(EINVAL));
1437 node_add_check_subport(struct rte_eth_dev *dev,
1439 uint32_t parent_node_id __rte_unused,
1442 uint32_t level_id __rte_unused,
1443 struct rte_tm_node_params *params,
1444 struct rte_tm_error *error)
1446 struct pmd_internals *p = dev->data->dev_private;
1448 /* node type: non-leaf */
1449 if (node_id < p->params.tm.n_queues)
1450 return -rte_tm_error_set(error,
1452 RTE_TM_ERROR_TYPE_NODE_ID,
1454 rte_strerror(EINVAL));
1456 /* Priority must be 0 */
1458 return -rte_tm_error_set(error,
1460 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1462 rte_strerror(EINVAL));
1464 /* Weight must be 1 */
1466 return -rte_tm_error_set(error,
1468 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1470 rte_strerror(EINVAL));
1472 /* Shaper must be valid */
1473 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1474 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1475 return -rte_tm_error_set(error,
1477 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1479 rte_strerror(EINVAL));
1481 /* No shared shapers */
1482 if (params->n_shared_shapers != 0)
1483 return -rte_tm_error_set(error,
1485 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1487 rte_strerror(EINVAL));
1489 /* Number of SP priorities must be 1 */
1490 if (params->nonleaf.n_sp_priorities != 1)
1491 return -rte_tm_error_set(error,
1493 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1495 rte_strerror(EINVAL));
1498 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1499 return -rte_tm_error_set(error,
1501 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1503 rte_strerror(EINVAL));
1509 node_add_check_pipe(struct rte_eth_dev *dev,
1511 uint32_t parent_node_id __rte_unused,
1513 uint32_t weight __rte_unused,
1514 uint32_t level_id __rte_unused,
1515 struct rte_tm_node_params *params,
1516 struct rte_tm_error *error)
1518 struct pmd_internals *p = dev->data->dev_private;
1520 /* node type: non-leaf */
1521 if (node_id < p->params.tm.n_queues)
1522 return -rte_tm_error_set(error,
1524 RTE_TM_ERROR_TYPE_NODE_ID,
1526 rte_strerror(EINVAL));
1528 /* Priority must be 0 */
1530 return -rte_tm_error_set(error,
1532 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1534 rte_strerror(EINVAL));
1536 /* Shaper must be valid */
1537 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1538 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1539 return -rte_tm_error_set(error,
1541 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1543 rte_strerror(EINVAL));
1545 /* No shared shapers */
1546 if (params->n_shared_shapers != 0)
1547 return -rte_tm_error_set(error,
1549 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1551 rte_strerror(EINVAL));
1553 /* Number of SP priorities must be 4 */
1554 if (params->nonleaf.n_sp_priorities !=
1555 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1556 return -rte_tm_error_set(error,
1558 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1560 rte_strerror(EINVAL));
1562 /* WFQ mode must be byte mode */
1563 if (params->nonleaf.wfq_weight_mode != NULL &&
1564 params->nonleaf.wfq_weight_mode[0] != 0 &&
1565 params->nonleaf.wfq_weight_mode[1] != 0 &&
1566 params->nonleaf.wfq_weight_mode[2] != 0 &&
1567 params->nonleaf.wfq_weight_mode[3] != 0)
1568 return -rte_tm_error_set(error,
1570 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1572 rte_strerror(EINVAL));
1575 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1576 return -rte_tm_error_set(error,
1578 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1580 rte_strerror(EINVAL));
1586 node_add_check_tc(struct rte_eth_dev *dev,
1588 uint32_t parent_node_id __rte_unused,
1589 uint32_t priority __rte_unused,
1591 uint32_t level_id __rte_unused,
1592 struct rte_tm_node_params *params,
1593 struct rte_tm_error *error)
1595 struct pmd_internals *p = dev->data->dev_private;
1597 /* node type: non-leaf */
1598 if (node_id < p->params.tm.n_queues)
1599 return -rte_tm_error_set(error,
1601 RTE_TM_ERROR_TYPE_NODE_ID,
1603 rte_strerror(EINVAL));
1605 /* Weight must be 1 */
1607 return -rte_tm_error_set(error,
1609 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1611 rte_strerror(EINVAL));
1613 /* Shaper must be valid */
1614 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1615 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1616 return -rte_tm_error_set(error,
1618 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1620 rte_strerror(EINVAL));
1622 /* Single valid shared shaper */
1623 if (params->n_shared_shapers > 1)
1624 return -rte_tm_error_set(error,
1626 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1628 rte_strerror(EINVAL));
1630 if (params->n_shared_shapers == 1 &&
1631 (params->shared_shaper_id == NULL ||
1632 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1633 return -rte_tm_error_set(error,
1635 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1637 rte_strerror(EINVAL));
1639 /* Number of priorities must be 1 */
1640 if (params->nonleaf.n_sp_priorities != 1)
1641 return -rte_tm_error_set(error,
1643 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1645 rte_strerror(EINVAL));
1648 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1649 return -rte_tm_error_set(error,
1651 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1653 rte_strerror(EINVAL));
1659 node_add_check_queue(struct rte_eth_dev *dev,
1661 uint32_t parent_node_id __rte_unused,
1663 uint32_t weight __rte_unused,
1664 uint32_t level_id __rte_unused,
1665 struct rte_tm_node_params *params,
1666 struct rte_tm_error *error)
1668 struct pmd_internals *p = dev->data->dev_private;
1670 /* node type: leaf */
1671 if (node_id >= p->params.tm.n_queues)
1672 return -rte_tm_error_set(error,
1674 RTE_TM_ERROR_TYPE_NODE_ID,
1676 rte_strerror(EINVAL));
1678 /* Priority must be 0 */
1680 return -rte_tm_error_set(error,
1682 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1684 rte_strerror(EINVAL));
1687 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1688 return -rte_tm_error_set(error,
1690 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1692 rte_strerror(EINVAL));
1694 /* No shared shapers */
1695 if (params->n_shared_shapers != 0)
1696 return -rte_tm_error_set(error,
1698 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1700 rte_strerror(EINVAL));
1702 /* Congestion management must not be head drop */
1703 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1704 return -rte_tm_error_set(error,
1706 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1708 rte_strerror(EINVAL));
1710 /* Congestion management set to WRED */
1711 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1712 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1713 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1716 /* WRED profile (for private WRED context) must be valid */
1717 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1719 return -rte_tm_error_set(error,
1721 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1723 rte_strerror(EINVAL));
1725 /* No shared WRED contexts */
1726 if (params->leaf.wred.n_shared_wred_contexts != 0)
1727 return -rte_tm_error_set(error,
1729 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1731 rte_strerror(EINVAL));
1735 if (params->stats_mask & ~STATS_MASK_QUEUE)
1736 return -rte_tm_error_set(error,
1738 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1740 rte_strerror(EINVAL));
1746 node_add_check(struct rte_eth_dev *dev,
1748 uint32_t parent_node_id,
1752 struct rte_tm_node_params *params,
1753 struct rte_tm_error *error)
1759 /* node_id, parent_node_id:
1760 * -node_id must not be RTE_TM_NODE_ID_NULL
1761 * -node_id must not be in use
1762 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1763 * -root node must not exist
1764 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1765 * -parent_node_id must be valid
1767 if (node_id == RTE_TM_NODE_ID_NULL)
1768 return -rte_tm_error_set(error,
1770 RTE_TM_ERROR_TYPE_NODE_ID,
1772 rte_strerror(EINVAL));
1774 if (tm_node_search(dev, node_id))
1775 return -rte_tm_error_set(error,
1777 RTE_TM_ERROR_TYPE_NODE_ID,
1779 rte_strerror(EEXIST));
1781 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1783 if (tm_root_node_present(dev))
1784 return -rte_tm_error_set(error,
1786 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1788 rte_strerror(EEXIST));
1790 pn = tm_node_search(dev, parent_node_id);
1792 return -rte_tm_error_set(error,
1794 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1796 rte_strerror(EINVAL));
1799 /* priority: must be 0 .. 3 */
1800 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1801 return -rte_tm_error_set(error,
1803 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1805 rte_strerror(EINVAL));
1807 /* weight: must be 1 .. 255 */
1808 if (weight == 0 || weight >= UINT8_MAX)
1809 return -rte_tm_error_set(error,
1811 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1813 rte_strerror(EINVAL));
1815 /* level_id: if valid, then
1816 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1817 * -level_id must be zero
1818 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1819 * -level_id must be parent level ID plus one
1821 level = (pn == NULL) ? 0 : pn->level + 1;
1822 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1823 return -rte_tm_error_set(error,
1825 RTE_TM_ERROR_TYPE_LEVEL_ID,
1827 rte_strerror(EINVAL));
1829 /* params: must not be NULL */
1831 return -rte_tm_error_set(error,
1833 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1835 rte_strerror(EINVAL));
1837 /* params: per level checks */
1839 case TM_NODE_LEVEL_PORT:
1840 status = node_add_check_port(dev, node_id,
1841 parent_node_id, priority, weight, level_id,
1847 case TM_NODE_LEVEL_SUBPORT:
1848 status = node_add_check_subport(dev, node_id,
1849 parent_node_id, priority, weight, level_id,
1855 case TM_NODE_LEVEL_PIPE:
1856 status = node_add_check_pipe(dev, node_id,
1857 parent_node_id, priority, weight, level_id,
1863 case TM_NODE_LEVEL_TC:
1864 status = node_add_check_tc(dev, node_id,
1865 parent_node_id, priority, weight, level_id,
1871 case TM_NODE_LEVEL_QUEUE:
1872 status = node_add_check_queue(dev, node_id,
1873 parent_node_id, priority, weight, level_id,
1880 return -rte_tm_error_set(error,
1882 RTE_TM_ERROR_TYPE_LEVEL_ID,
1884 rte_strerror(EINVAL));
1890 /* Traffic manager node add */
1892 pmd_tm_node_add(struct rte_eth_dev *dev,
1894 uint32_t parent_node_id,
1898 struct rte_tm_node_params *params,
1899 struct rte_tm_error *error)
1901 struct pmd_internals *p = dev->data->dev_private;
1902 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1908 if (p->soft.tm.hierarchy_frozen)
1909 return -rte_tm_error_set(error,
1911 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1913 rte_strerror(EBUSY));
1915 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1916 level_id, params, error);
1920 /* Memory allocation */
1921 n = calloc(1, sizeof(struct tm_node));
1923 return -rte_tm_error_set(error,
1925 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1927 rte_strerror(ENOMEM));
1930 n->node_id = node_id;
1931 n->parent_node_id = parent_node_id;
1932 n->priority = priority;
1935 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1936 n->parent_node = tm_node_search(dev, parent_node_id);
1937 n->level = n->parent_node->level + 1;
1940 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1941 n->shaper_profile = tm_shaper_profile_search(dev,
1942 params->shaper_profile_id);
1944 if (n->level == TM_NODE_LEVEL_QUEUE &&
1945 params->leaf.cman == RTE_TM_CMAN_WRED)
1946 n->wred_profile = tm_wred_profile_search(dev,
1947 params->leaf.wred.wred_profile_id);
1949 memcpy(&n->params, params, sizeof(n->params));
1952 TAILQ_INSERT_TAIL(nl, n, node);
1953 p->soft.tm.h.n_nodes++;
1955 /* Update dependencies */
1957 n->parent_node->n_children++;
1959 if (n->shaper_profile)
1960 n->shaper_profile->n_users++;
1962 for (i = 0; i < params->n_shared_shapers; i++) {
1963 struct tm_shared_shaper *ss;
1965 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1969 if (n->wred_profile)
1970 n->wred_profile->n_users++;
1972 p->soft.tm.h.n_tm_nodes[n->level]++;
1977 /* Traffic manager node delete */
1979 pmd_tm_node_delete(struct rte_eth_dev *dev,
1981 struct rte_tm_error *error)
1983 struct pmd_internals *p = dev->data->dev_private;
1987 /* Check hierarchy changes are currently allowed */
1988 if (p->soft.tm.hierarchy_frozen)
1989 return -rte_tm_error_set(error,
1991 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1993 rte_strerror(EBUSY));
1995 /* Check existing */
1996 n = tm_node_search(dev, node_id);
1998 return -rte_tm_error_set(error,
2000 RTE_TM_ERROR_TYPE_NODE_ID,
2002 rte_strerror(EINVAL));
2006 return -rte_tm_error_set(error,
2008 RTE_TM_ERROR_TYPE_NODE_ID,
2010 rte_strerror(EBUSY));
2012 /* Update dependencies */
2013 p->soft.tm.h.n_tm_nodes[n->level]--;
2015 if (n->wred_profile)
2016 n->wred_profile->n_users--;
2018 for (i = 0; i < n->params.n_shared_shapers; i++) {
2019 struct tm_shared_shaper *ss;
2021 ss = tm_shared_shaper_search(dev,
2022 n->params.shared_shaper_id[i]);
2026 if (n->shaper_profile)
2027 n->shaper_profile->n_users--;
2030 n->parent_node->n_children--;
2032 /* Remove from list */
2033 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2034 p->soft.tm.h.n_nodes--;
2042 pipe_profile_build(struct rte_eth_dev *dev,
2044 struct rte_sched_pipe_params *pp)
2046 struct pmd_internals *p = dev->data->dev_private;
2047 struct tm_hierarchy *h = &p->soft.tm.h;
2048 struct tm_node_list *nl = &h->nodes;
2049 struct tm_node *nt, *nq;
2051 memset(pp, 0, sizeof(*pp));
2054 pp->tb_rate = np->shaper_profile->params.peak.rate;
2055 pp->tb_size = np->shaper_profile->params.peak.size;
2057 /* Traffic Class (TC) */
2058 pp->tc_period = PIPE_TC_PERIOD;
2060 #ifdef RTE_SCHED_SUBPORT_TC_OV
2061 pp->tc_ov_weight = np->weight;
2064 TAILQ_FOREACH(nt, nl, node) {
2065 uint32_t queue_id = 0;
2067 if (nt->level != TM_NODE_LEVEL_TC ||
2068 nt->parent_node_id != np->node_id)
2071 pp->tc_rate[nt->priority] =
2072 nt->shaper_profile->params.peak.rate;
2075 TAILQ_FOREACH(nq, nl, node) {
2076 uint32_t pipe_queue_id;
2078 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2079 nq->parent_node_id != nt->node_id)
2082 pipe_queue_id = nt->priority *
2083 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2084 pp->wrr_weights[pipe_queue_id] = nq->weight;
2092 pipe_profile_free_exists(struct rte_eth_dev *dev,
2093 uint32_t *pipe_profile_id)
2095 struct pmd_internals *p = dev->data->dev_private;
2096 struct tm_params *t = &p->soft.tm.params;
2098 if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2099 *pipe_profile_id = t->n_pipe_profiles;
2107 pipe_profile_exists(struct rte_eth_dev *dev,
2108 struct rte_sched_pipe_params *pp,
2109 uint32_t *pipe_profile_id)
2111 struct pmd_internals *p = dev->data->dev_private;
2112 struct tm_params *t = &p->soft.tm.params;
2115 for (i = 0; i < t->n_pipe_profiles; i++)
2116 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2117 if (pipe_profile_id)
2118 *pipe_profile_id = i;
2126 pipe_profile_install(struct rte_eth_dev *dev,
2127 struct rte_sched_pipe_params *pp,
2128 uint32_t pipe_profile_id)
2130 struct pmd_internals *p = dev->data->dev_private;
2131 struct tm_params *t = &p->soft.tm.params;
2133 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2134 t->n_pipe_profiles++;
2138 pipe_profile_mark(struct rte_eth_dev *dev,
2139 uint32_t subport_id,
2141 uint32_t pipe_profile_id)
2143 struct pmd_internals *p = dev->data->dev_private;
2144 struct tm_hierarchy *h = &p->soft.tm.h;
2145 struct tm_params *t = &p->soft.tm.params;
2146 uint32_t n_pipes_per_subport, pos;
2148 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2149 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2150 pos = subport_id * n_pipes_per_subport + pipe_id;
2152 t->pipe_to_profile[pos] = pipe_profile_id;
2155 static struct rte_sched_pipe_params *
2156 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2158 struct pmd_internals *p = dev->data->dev_private;
2159 struct tm_hierarchy *h = &p->soft.tm.h;
2160 struct tm_params *t = &p->soft.tm.params;
2161 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2162 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2164 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2165 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2167 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2168 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2170 return &t->pipe_profiles[pipe_profile_id];
2174 pipe_profiles_generate(struct rte_eth_dev *dev)
2176 struct pmd_internals *p = dev->data->dev_private;
2177 struct tm_hierarchy *h = &p->soft.tm.h;
2178 struct tm_node_list *nl = &h->nodes;
2179 struct tm_node *ns, *np;
2180 uint32_t subport_id;
2182 /* Objective: Fill in the following fields in struct tm_params:
2189 TAILQ_FOREACH(ns, nl, node) {
2192 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2196 TAILQ_FOREACH(np, nl, node) {
2197 struct rte_sched_pipe_params pp;
2200 if (np->level != TM_NODE_LEVEL_PIPE ||
2201 np->parent_node_id != ns->node_id)
2204 pipe_profile_build(dev, np, &pp);
2206 if (!pipe_profile_exists(dev, &pp, &pos)) {
2207 if (!pipe_profile_free_exists(dev, &pos))
2210 pipe_profile_install(dev, &pp, pos);
2213 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2224 static struct tm_wred_profile *
2225 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2227 struct pmd_internals *p = dev->data->dev_private;
2228 struct tm_hierarchy *h = &p->soft.tm.h;
2229 struct tm_node_list *nl = &h->nodes;
2232 TAILQ_FOREACH(nq, nl, node) {
2233 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2234 nq->parent_node->priority != tc_id)
2237 return nq->wred_profile;
2243 #ifdef RTE_SCHED_RED
2246 wred_profiles_set(struct rte_eth_dev *dev)
2248 struct pmd_internals *p = dev->data->dev_private;
2249 struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2251 enum rte_tm_color color;
2253 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2254 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2255 struct rte_red_params *dst =
2256 &pp->red_params[tc_id][color];
2257 struct tm_wred_profile *src_wp =
2258 tm_tc_wred_profile_get(dev, tc_id);
2259 struct rte_tm_red_params *src =
2260 &src_wp->params.red_params[color];
2262 memcpy(dst, src, sizeof(*dst));
2268 #define wred_profiles_set(dev)
2272 static struct tm_shared_shaper *
2273 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2275 return (tc_node->params.n_shared_shapers) ?
2276 tm_shared_shaper_search(dev,
2277 tc_node->params.shared_shaper_id[0]) :
2281 static struct tm_shared_shaper *
2282 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2283 struct tm_node *subport_node,
2286 struct pmd_internals *p = dev->data->dev_private;
2287 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2290 TAILQ_FOREACH(n, nl, node) {
2291 if (n->level != TM_NODE_LEVEL_TC ||
2292 n->parent_node->parent_node_id !=
2293 subport_node->node_id ||
2294 n->priority != tc_id)
2297 return tm_tc_shared_shaper_get(dev, n);
2304 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2306 struct pmd_internals *p = dev->data->dev_private;
2307 struct tm_hierarchy *h = &p->soft.tm.h;
2308 struct tm_node_list *nl = &h->nodes;
2309 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2310 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2311 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2312 struct tm_shared_shaper *ss;
2314 uint32_t n_pipes_per_subport;
2316 /* Root node exists. */
2318 return -rte_tm_error_set(error,
2320 RTE_TM_ERROR_TYPE_LEVEL_ID,
2322 rte_strerror(EINVAL));
2324 /* There is at least one subport, max is not exceeded. */
2325 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2326 return -rte_tm_error_set(error,
2328 RTE_TM_ERROR_TYPE_LEVEL_ID,
2330 rte_strerror(EINVAL));
2332 /* There is at least one pipe. */
2333 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2334 return -rte_tm_error_set(error,
2336 RTE_TM_ERROR_TYPE_LEVEL_ID,
2338 rte_strerror(EINVAL));
2340 /* Number of pipes is the same for all subports. Maximum number of pipes
2341 * per subport is not exceeded.
2343 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2344 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2346 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2347 return -rte_tm_error_set(error,
2349 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2351 rte_strerror(EINVAL));
2353 TAILQ_FOREACH(ns, nl, node) {
2354 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2357 if (ns->n_children != n_pipes_per_subport)
2358 return -rte_tm_error_set(error,
2360 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2362 rte_strerror(EINVAL));
2365 /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2366 TAILQ_FOREACH(np, nl, node) {
2367 uint32_t mask = 0, mask_expected =
2368 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2371 if (np->level != TM_NODE_LEVEL_PIPE)
2374 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2375 return -rte_tm_error_set(error,
2377 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2379 rte_strerror(EINVAL));
2381 TAILQ_FOREACH(nt, nl, node) {
2382 if (nt->level != TM_NODE_LEVEL_TC ||
2383 nt->parent_node_id != np->node_id)
2386 mask |= 1 << nt->priority;
2389 if (mask != mask_expected)
2390 return -rte_tm_error_set(error,
2392 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2394 rte_strerror(EINVAL));
2397 /* Each TC has exactly 4 packet queues. */
2398 TAILQ_FOREACH(nt, nl, node) {
2399 if (nt->level != TM_NODE_LEVEL_TC)
2402 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2403 return -rte_tm_error_set(error,
2405 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2407 rte_strerror(EINVAL));
2412 * -For each TC #i, all pipes in the same subport use the same
2413 * shared shaper (or no shared shaper) for their TC#i.
2414 * -Each shared shaper needs to have at least one user. All its
2415 * users have to be TC nodes with the same priority and the same
2418 TAILQ_FOREACH(ns, nl, node) {
2419 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2422 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2425 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2426 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2428 TAILQ_FOREACH(nt, nl, node) {
2429 struct tm_shared_shaper *subport_ss, *tc_ss;
2431 if (nt->level != TM_NODE_LEVEL_TC ||
2432 nt->parent_node->parent_node_id !=
2436 subport_ss = s[nt->priority];
2437 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2439 if (subport_ss == NULL && tc_ss == NULL)
2442 if ((subport_ss == NULL && tc_ss != NULL) ||
2443 (subport_ss != NULL && tc_ss == NULL) ||
2444 subport_ss->shared_shaper_id !=
2445 tc_ss->shared_shaper_id)
2446 return -rte_tm_error_set(error,
2448 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2450 rte_strerror(EINVAL));
2454 TAILQ_FOREACH(ss, ssl, node) {
2455 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2456 uint32_t n_users = 0;
2459 TAILQ_FOREACH(nt, nl, node) {
2460 if (nt->level != TM_NODE_LEVEL_TC ||
2461 nt->priority != nt_any->priority ||
2462 nt->parent_node->parent_node_id !=
2463 nt_any->parent_node->parent_node_id)
2469 if (ss->n_users == 0 || ss->n_users != n_users)
2470 return -rte_tm_error_set(error,
2472 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2474 rte_strerror(EINVAL));
2477 /* Not too many pipe profiles. */
2478 if (pipe_profiles_generate(dev))
2479 return -rte_tm_error_set(error,
2481 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2483 rte_strerror(EINVAL));
2486 * WRED (when used, i.e. at least one WRED profile defined):
2487 * -Each WRED profile must have at least one user.
2488 * -All leaf nodes must have their private WRED context enabled.
2489 * -For each TC #i, all leaf nodes must use the same WRED profile
2490 * for their private WRED context.
2492 if (h->n_wred_profiles) {
2493 struct tm_wred_profile *wp;
2494 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2497 TAILQ_FOREACH(wp, wpl, node)
2498 if (wp->n_users == 0)
2499 return -rte_tm_error_set(error,
2501 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2503 rte_strerror(EINVAL));
2505 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2506 w[id] = tm_tc_wred_profile_get(dev, id);
2509 return -rte_tm_error_set(error,
2511 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2513 rte_strerror(EINVAL));
2516 TAILQ_FOREACH(nq, nl, node) {
2519 if (nq->level != TM_NODE_LEVEL_QUEUE)
2522 id = nq->parent_node->priority;
2524 if (nq->wred_profile == NULL ||
2525 nq->wred_profile->wred_profile_id !=
2526 w[id]->wred_profile_id)
2527 return -rte_tm_error_set(error,
2529 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2531 rte_strerror(EINVAL));
2539 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2541 struct pmd_internals *p = dev->data->dev_private;
2542 struct tm_params *t = &p->soft.tm.params;
2543 struct tm_hierarchy *h = &p->soft.tm.h;
2545 struct tm_node_list *nl = &h->nodes;
2546 struct tm_node *root = tm_root_node_present(dev), *n;
2548 uint32_t subport_id;
2550 t->port_params = (struct rte_sched_port_params) {
2551 .name = dev->data->name,
2552 .socket = dev->data->numa_node,
2553 .rate = root->shaper_profile->params.peak.rate,
2554 .mtu = dev->data->mtu,
2556 root->shaper_profile->params.pkt_length_adjust,
2557 .n_subports_per_port = root->n_children,
2558 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2559 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2560 .qsize = {p->params.tm.qsize[0],
2561 p->params.tm.qsize[1],
2562 p->params.tm.qsize[2],
2563 p->params.tm.qsize[3],
2565 .pipe_profiles = t->pipe_profiles,
2566 .n_pipe_profiles = t->n_pipe_profiles,
2569 wred_profiles_set(dev);
2572 TAILQ_FOREACH(n, nl, node) {
2573 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2576 if (n->level != TM_NODE_LEVEL_SUBPORT)
2579 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2580 struct tm_shared_shaper *ss;
2581 struct tm_shaper_profile *sp;
2583 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2584 sp = (ss) ? tm_shaper_profile_search(dev,
2585 ss->shaper_profile_id) :
2587 tc_rate[i] = sp->params.peak.rate;
2590 t->subport_params[subport_id] =
2591 (struct rte_sched_subport_params) {
2592 .tb_rate = n->shaper_profile->params.peak.rate,
2593 .tb_size = n->shaper_profile->params.peak.size,
2595 .tc_rate = {tc_rate[0],
2600 .tc_period = SUBPORT_TC_PERIOD,
2607 /* Traffic manager hierarchy commit */
2609 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2611 struct rte_tm_error *error)
2613 struct pmd_internals *p = dev->data->dev_private;
2617 if (p->soft.tm.hierarchy_frozen)
2618 return -rte_tm_error_set(error,
2620 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2622 rte_strerror(EBUSY));
2624 status = hierarchy_commit_check(dev, error);
2626 if (clear_on_fail) {
2627 tm_hierarchy_uninit(p);
2628 tm_hierarchy_init(p);
2634 /* Create blueprints */
2635 hierarchy_blueprints_create(dev);
2637 /* Freeze hierarchy */
2638 p->soft.tm.hierarchy_frozen = 1;
2643 #ifdef RTE_SCHED_SUBPORT_TC_OV
2646 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2648 struct pmd_internals *p = dev->data->dev_private;
2649 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2651 struct tm_node *ns = np->parent_node;
2652 uint32_t subport_id = tm_node_subport_id(dev, ns);
2654 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2655 struct rte_sched_pipe_params profile1;
2656 uint32_t pipe_profile_id;
2658 /* Derive new pipe profile. */
2659 memcpy(&profile1, profile0, sizeof(profile1));
2660 profile1.tc_ov_weight = (uint8_t)weight;
2662 /* Since implementation does not allow adding more pipe profiles after
2663 * port configuration, the pipe configuration can be successfully
2664 * updated only if the new profile is also part of the existing set of
2667 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2670 /* Update the pipe profile used by the current pipe. */
2671 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2672 (int32_t)pipe_profile_id))
2675 /* Commit changes. */
2676 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2677 np->weight = weight;
2685 update_queue_weight(struct rte_eth_dev *dev,
2686 struct tm_node *nq, uint32_t weight)
2688 struct pmd_internals *p = dev->data->dev_private;
2689 uint32_t queue_id = tm_node_queue_id(dev, nq);
2691 struct tm_node *nt = nq->parent_node;
2692 uint32_t tc_id = tm_node_tc_id(dev, nt);
2694 struct tm_node *np = nt->parent_node;
2695 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2697 struct tm_node *ns = np->parent_node;
2698 uint32_t subport_id = tm_node_subport_id(dev, ns);
2700 uint32_t pipe_queue_id =
2701 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2703 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2704 struct rte_sched_pipe_params profile1;
2705 uint32_t pipe_profile_id;
2707 /* Derive new pipe profile. */
2708 memcpy(&profile1, profile0, sizeof(profile1));
2709 profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2711 /* Since implementation does not allow adding more pipe profiles after
2712 * port configuration, the pipe configuration can be successfully
2713 * updated only if the new profile is also part of the existing set
2716 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2719 /* Update the pipe profile used by the current pipe. */
2720 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2721 (int32_t)pipe_profile_id))
2724 /* Commit changes. */
2725 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2726 nq->weight = weight;
2731 /* Traffic manager node parent update */
2733 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2735 uint32_t parent_node_id,
2738 struct rte_tm_error *error)
2742 /* Port must be started and TM used. */
2743 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2744 return -rte_tm_error_set(error,
2746 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2748 rte_strerror(EBUSY));
2750 /* Node must be valid */
2751 n = tm_node_search(dev, node_id);
2753 return -rte_tm_error_set(error,
2755 RTE_TM_ERROR_TYPE_NODE_ID,
2757 rte_strerror(EINVAL));
2759 /* Parent node must be the same */
2760 if (n->parent_node_id != parent_node_id)
2761 return -rte_tm_error_set(error,
2763 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2765 rte_strerror(EINVAL));
2767 /* Priority must be the same */
2768 if (n->priority != priority)
2769 return -rte_tm_error_set(error,
2771 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2773 rte_strerror(EINVAL));
2775 /* weight: must be 1 .. 255 */
2776 if (weight == 0 || weight >= UINT8_MAX)
2777 return -rte_tm_error_set(error,
2779 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2781 rte_strerror(EINVAL));
2784 case TM_NODE_LEVEL_PORT:
2785 return -rte_tm_error_set(error,
2787 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2789 rte_strerror(EINVAL));
2791 case TM_NODE_LEVEL_SUBPORT:
2792 return -rte_tm_error_set(error,
2794 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2796 rte_strerror(EINVAL));
2798 case TM_NODE_LEVEL_PIPE:
2799 #ifdef RTE_SCHED_SUBPORT_TC_OV
2800 if (update_pipe_weight(dev, n, weight))
2801 return -rte_tm_error_set(error,
2803 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2805 rte_strerror(EINVAL));
2808 return -rte_tm_error_set(error,
2810 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2812 rte_strerror(EINVAL));
2815 case TM_NODE_LEVEL_TC:
2816 return -rte_tm_error_set(error,
2818 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2820 rte_strerror(EINVAL));
2822 case TM_NODE_LEVEL_QUEUE:
2825 if (update_queue_weight(dev, n, weight))
2826 return -rte_tm_error_set(error,
2828 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2830 rte_strerror(EINVAL));
2836 update_subport_rate(struct rte_eth_dev *dev,
2838 struct tm_shaper_profile *sp)
2840 struct pmd_internals *p = dev->data->dev_private;
2841 uint32_t subport_id = tm_node_subport_id(dev, ns);
2843 struct rte_sched_subport_params subport_params;
2845 /* Derive new subport configuration. */
2846 memcpy(&subport_params,
2847 &p->soft.tm.params.subport_params[subport_id],
2848 sizeof(subport_params));
2849 subport_params.tb_rate = sp->params.peak.rate;
2850 subport_params.tb_size = sp->params.peak.size;
2852 /* Update the subport configuration. */
2853 if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2857 /* Commit changes. */
2858 ns->shaper_profile->n_users--;
2860 ns->shaper_profile = sp;
2861 ns->params.shaper_profile_id = sp->shaper_profile_id;
2864 memcpy(&p->soft.tm.params.subport_params[subport_id],
2866 sizeof(subport_params));
2872 update_pipe_rate(struct rte_eth_dev *dev,
2874 struct tm_shaper_profile *sp)
2876 struct pmd_internals *p = dev->data->dev_private;
2877 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2879 struct tm_node *ns = np->parent_node;
2880 uint32_t subport_id = tm_node_subport_id(dev, ns);
2882 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2883 struct rte_sched_pipe_params profile1;
2884 uint32_t pipe_profile_id;
2886 /* Derive new pipe profile. */
2887 memcpy(&profile1, profile0, sizeof(profile1));
2888 profile1.tb_rate = sp->params.peak.rate;
2889 profile1.tb_size = sp->params.peak.size;
2891 /* Since implementation does not allow adding more pipe profiles after
2892 * port configuration, the pipe configuration can be successfully
2893 * updated only if the new profile is also part of the existing set of
2896 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2899 /* Update the pipe profile used by the current pipe. */
2900 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2901 (int32_t)pipe_profile_id))
2904 /* Commit changes. */
2905 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2906 np->shaper_profile->n_users--;
2907 np->shaper_profile = sp;
2908 np->params.shaper_profile_id = sp->shaper_profile_id;
2915 update_tc_rate(struct rte_eth_dev *dev,
2917 struct tm_shaper_profile *sp)
2919 struct pmd_internals *p = dev->data->dev_private;
2920 uint32_t tc_id = tm_node_tc_id(dev, nt);
2922 struct tm_node *np = nt->parent_node;
2923 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2925 struct tm_node *ns = np->parent_node;
2926 uint32_t subport_id = tm_node_subport_id(dev, ns);
2928 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2929 struct rte_sched_pipe_params profile1;
2930 uint32_t pipe_profile_id;
2932 /* Derive new pipe profile. */
2933 memcpy(&profile1, profile0, sizeof(profile1));
2934 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2936 /* Since implementation does not allow adding more pipe profiles after
2937 * port configuration, the pipe configuration can be successfully
2938 * updated only if the new profile is also part of the existing set of
2941 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2944 /* Update the pipe profile used by the current pipe. */
2945 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2946 (int32_t)pipe_profile_id))
2949 /* Commit changes. */
2950 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2951 nt->shaper_profile->n_users--;
2952 nt->shaper_profile = sp;
2953 nt->params.shaper_profile_id = sp->shaper_profile_id;
2959 /* Traffic manager node shaper update */
2961 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2963 uint32_t shaper_profile_id,
2964 struct rte_tm_error *error)
2967 struct tm_shaper_profile *sp;
2969 /* Port must be started and TM used. */
2970 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2971 return -rte_tm_error_set(error,
2973 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2975 rte_strerror(EBUSY));
2977 /* Node must be valid */
2978 n = tm_node_search(dev, node_id);
2980 return -rte_tm_error_set(error,
2982 RTE_TM_ERROR_TYPE_NODE_ID,
2984 rte_strerror(EINVAL));
2986 /* Shaper profile must be valid. */
2987 sp = tm_shaper_profile_search(dev, shaper_profile_id);
2989 return -rte_tm_error_set(error,
2991 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2993 rte_strerror(EINVAL));
2996 case TM_NODE_LEVEL_PORT:
2997 return -rte_tm_error_set(error,
2999 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3001 rte_strerror(EINVAL));
3003 case TM_NODE_LEVEL_SUBPORT:
3004 if (update_subport_rate(dev, n, sp))
3005 return -rte_tm_error_set(error,
3007 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3009 rte_strerror(EINVAL));
3012 case TM_NODE_LEVEL_PIPE:
3013 if (update_pipe_rate(dev, n, sp))
3014 return -rte_tm_error_set(error,
3016 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3018 rte_strerror(EINVAL));
3021 case TM_NODE_LEVEL_TC:
3022 if (update_tc_rate(dev, n, sp))
3023 return -rte_tm_error_set(error,
3025 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3027 rte_strerror(EINVAL));
3030 case TM_NODE_LEVEL_QUEUE:
3033 return -rte_tm_error_set(error,
3035 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3037 rte_strerror(EINVAL));
3041 static inline uint32_t
3042 tm_port_queue_id(struct rte_eth_dev *dev,
3043 uint32_t port_subport_id,
3044 uint32_t subport_pipe_id,
3045 uint32_t pipe_tc_id,
3046 uint32_t tc_queue_id)
3048 struct pmd_internals *p = dev->data->dev_private;
3049 struct tm_hierarchy *h = &p->soft.tm.h;
3050 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3051 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3053 uint32_t port_pipe_id =
3054 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3055 uint32_t port_tc_id =
3056 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3057 uint32_t port_queue_id =
3058 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3060 return port_queue_id;
3064 read_port_stats(struct rte_eth_dev *dev,
3066 struct rte_tm_node_stats *stats,
3067 uint64_t *stats_mask,
3070 struct pmd_internals *p = dev->data->dev_private;
3071 struct tm_hierarchy *h = &p->soft.tm.h;
3072 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3073 uint32_t subport_id;
3075 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3076 struct rte_sched_subport_stats s;
3080 int status = rte_sched_subport_read_stats(
3088 /* Stats accumulate */
3089 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3091 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3092 nr->stats.n_bytes +=
3093 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3094 nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3095 s.n_pkts_tc_dropped[id];
3096 nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3097 s.n_bytes_tc_dropped[id];
3103 memcpy(stats, &nr->stats, sizeof(*stats));
3106 *stats_mask = STATS_MASK_DEFAULT;
3110 memset(&nr->stats, 0, sizeof(nr->stats));
3116 read_subport_stats(struct rte_eth_dev *dev,
3118 struct rte_tm_node_stats *stats,
3119 uint64_t *stats_mask,
3122 struct pmd_internals *p = dev->data->dev_private;
3123 uint32_t subport_id = tm_node_subport_id(dev, ns);
3124 struct rte_sched_subport_stats s;
3125 uint32_t tc_ov, tc_id;
3128 int status = rte_sched_subport_read_stats(
3136 /* Stats accumulate */
3137 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3139 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3140 ns->stats.n_bytes +=
3141 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3142 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3143 s.n_pkts_tc_dropped[tc_id];
3144 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3145 s.n_bytes_tc_dropped[tc_id];
3150 memcpy(stats, &ns->stats, sizeof(*stats));
3153 *stats_mask = STATS_MASK_DEFAULT;
3157 memset(&ns->stats, 0, sizeof(ns->stats));
3163 read_pipe_stats(struct rte_eth_dev *dev,
3165 struct rte_tm_node_stats *stats,
3166 uint64_t *stats_mask,
3169 struct pmd_internals *p = dev->data->dev_private;
3171 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3173 struct tm_node *ns = np->parent_node;
3174 uint32_t subport_id = tm_node_subport_id(dev, ns);
3179 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3180 struct rte_sched_queue_stats s;
3183 uint32_t qid = tm_port_queue_id(dev,
3186 i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3187 i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3189 int status = rte_sched_queue_read_stats(
3197 /* Stats accumulate */
3198 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3199 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3200 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3201 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3203 np->stats.leaf.n_pkts_queued = qlen;
3208 memcpy(stats, &np->stats, sizeof(*stats));
3211 *stats_mask = STATS_MASK_DEFAULT;
3215 memset(&np->stats, 0, sizeof(np->stats));
3221 read_tc_stats(struct rte_eth_dev *dev,
3223 struct rte_tm_node_stats *stats,
3224 uint64_t *stats_mask,
3227 struct pmd_internals *p = dev->data->dev_private;
3229 uint32_t tc_id = tm_node_tc_id(dev, nt);
3231 struct tm_node *np = nt->parent_node;
3232 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3234 struct tm_node *ns = np->parent_node;
3235 uint32_t subport_id = tm_node_subport_id(dev, ns);
3240 for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3241 struct rte_sched_queue_stats s;
3244 uint32_t qid = tm_port_queue_id(dev,
3250 int status = rte_sched_queue_read_stats(
3258 /* Stats accumulate */
3259 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3260 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3261 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3262 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3264 nt->stats.leaf.n_pkts_queued = qlen;
3269 memcpy(stats, &nt->stats, sizeof(*stats));
3272 *stats_mask = STATS_MASK_DEFAULT;
3276 memset(&nt->stats, 0, sizeof(nt->stats));
3282 read_queue_stats(struct rte_eth_dev *dev,
3284 struct rte_tm_node_stats *stats,
3285 uint64_t *stats_mask,
3288 struct pmd_internals *p = dev->data->dev_private;
3289 struct rte_sched_queue_stats s;
3292 uint32_t queue_id = tm_node_queue_id(dev, nq);
3294 struct tm_node *nt = nq->parent_node;
3295 uint32_t tc_id = tm_node_tc_id(dev, nt);
3297 struct tm_node *np = nt->parent_node;
3298 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3300 struct tm_node *ns = np->parent_node;
3301 uint32_t subport_id = tm_node_subport_id(dev, ns);
3304 uint32_t qid = tm_port_queue_id(dev,
3310 int status = rte_sched_queue_read_stats(
3318 /* Stats accumulate */
3319 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3320 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3321 nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3322 nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3324 nq->stats.leaf.n_pkts_queued = qlen;
3328 memcpy(stats, &nq->stats, sizeof(*stats));
3331 *stats_mask = STATS_MASK_QUEUE;
3335 memset(&nq->stats, 0, sizeof(nq->stats));
3340 /* Traffic manager read stats counters for specific node */
3342 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3344 struct rte_tm_node_stats *stats,
3345 uint64_t *stats_mask,
3347 struct rte_tm_error *error)
3351 /* Port must be started and TM used. */
3352 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3353 return -rte_tm_error_set(error,
3355 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3357 rte_strerror(EBUSY));
3359 /* Node must be valid */
3360 n = tm_node_search(dev, node_id);
3362 return -rte_tm_error_set(error,
3364 RTE_TM_ERROR_TYPE_NODE_ID,
3366 rte_strerror(EINVAL));
3369 case TM_NODE_LEVEL_PORT:
3370 if (read_port_stats(dev, n, stats, stats_mask, clear))
3371 return -rte_tm_error_set(error,
3373 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3375 rte_strerror(EINVAL));
3378 case TM_NODE_LEVEL_SUBPORT:
3379 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3380 return -rte_tm_error_set(error,
3382 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3384 rte_strerror(EINVAL));
3387 case TM_NODE_LEVEL_PIPE:
3388 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3389 return -rte_tm_error_set(error,
3391 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3393 rte_strerror(EINVAL));
3396 case TM_NODE_LEVEL_TC:
3397 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3398 return -rte_tm_error_set(error,
3400 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3402 rte_strerror(EINVAL));
3405 case TM_NODE_LEVEL_QUEUE:
3407 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3408 return -rte_tm_error_set(error,
3410 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3412 rte_strerror(EINVAL));
3417 const struct rte_tm_ops pmd_tm_ops = {
3418 .node_type_get = pmd_tm_node_type_get,
3419 .capabilities_get = pmd_tm_capabilities_get,
3420 .level_capabilities_get = pmd_tm_level_capabilities_get,
3421 .node_capabilities_get = pmd_tm_node_capabilities_get,
3423 .wred_profile_add = pmd_tm_wred_profile_add,
3424 .wred_profile_delete = pmd_tm_wred_profile_delete,
3425 .shared_wred_context_add_update = NULL,
3426 .shared_wred_context_delete = NULL,
3428 .shaper_profile_add = pmd_tm_shaper_profile_add,
3429 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3430 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3431 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3433 .node_add = pmd_tm_node_add,
3434 .node_delete = pmd_tm_node_delete,
3435 .node_suspend = NULL,
3436 .node_resume = NULL,
3437 .hierarchy_commit = pmd_tm_hierarchy_commit,
3439 .node_parent_update = pmd_tm_node_parent_update,
3440 .node_shaper_update = pmd_tm_node_shaper_update,
3441 .node_shared_shaper_update = NULL,
3442 .node_stats_update = NULL,
3443 .node_wfq_weight_mode_update = NULL,
3444 .node_cman_update = NULL,
3445 .node_wred_context_update = NULL,
3446 .node_shared_wred_context_update = NULL,
3448 .node_stats_read = pmd_tm_node_stats_read,