1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 softnic_tmgr_init(struct pmd_internals *p)
21 TAILQ_INIT(&p->tmgr_port_list);
27 softnic_tmgr_free(struct pmd_internals *p)
30 struct softnic_tmgr_port *tmgr_port;
32 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33 if (tmgr_port == NULL)
36 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37 rte_sched_port_free(tmgr_port->s);
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
46 struct softnic_tmgr_port *tmgr_port;
51 TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52 if (strcmp(tmgr_port->name, name) == 0)
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
62 struct softnic_tmgr_port *tmgr_port;
63 struct tm_params *t = &p->soft.tm.params;
64 struct rte_sched_port *sched;
65 uint32_t n_subports, subport_id;
67 /* Check input params */
69 softnic_tmgr_port_find(p, name))
76 /* Is hierarchy frozen? */
77 if (p->soft.tm.hierarchy_frozen == 0)
81 sched = rte_sched_port_config(&t->port_params);
86 n_subports = t->port_params.n_subports_per_port;
87 for (subport_id = 0; subport_id < n_subports; subport_id++) {
88 uint32_t n_pipes_per_subport =
89 t->subport_params[subport_id].n_pipes_per_subport_enabled;
93 status = rte_sched_subport_config(sched,
95 &t->subport_params[subport_id]);
97 rte_sched_port_free(sched);
102 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
103 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
104 int profile_id = t->pipe_to_profile[pos];
109 status = rte_sched_pipe_config(sched,
114 rte_sched_port_free(sched);
120 /* Node allocation */
121 tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
122 if (tmgr_port == NULL) {
123 rte_sched_port_free(sched);
128 strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
129 tmgr_port->s = sched;
131 /* Node add to list */
132 TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
137 static struct rte_sched_port *
138 SCHED(struct pmd_internals *p)
140 struct softnic_tmgr_port *tmgr_port;
142 tmgr_port = softnic_tmgr_port_find(p, "TMGR");
143 if (tmgr_port == NULL)
150 tm_hierarchy_init(struct pmd_internals *p)
152 memset(&p->soft.tm, 0, sizeof(p->soft.tm));
154 /* Initialize shaper profile list */
155 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
157 /* Initialize shared shaper list */
158 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
160 /* Initialize wred profile list */
161 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
163 /* Initialize TM node list */
164 TAILQ_INIT(&p->soft.tm.h.nodes);
168 tm_hierarchy_free(struct pmd_internals *p)
170 /* Remove all nodes*/
172 struct tm_node *tm_node;
174 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
178 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
182 /* Remove all WRED profiles */
184 struct tm_wred_profile *wred_profile;
186 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
187 if (wred_profile == NULL)
190 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
194 /* Remove all shared shapers */
196 struct tm_shared_shaper *shared_shaper;
198 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
199 if (shared_shaper == NULL)
202 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
206 /* Remove all shaper profiles */
208 struct tm_shaper_profile *shaper_profile;
210 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
211 if (shaper_profile == NULL)
214 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
215 shaper_profile, node);
216 free(shaper_profile);
219 tm_hierarchy_init(p);
222 static struct tm_shaper_profile *
223 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
225 struct pmd_internals *p = dev->data->dev_private;
226 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
227 struct tm_shaper_profile *sp;
229 TAILQ_FOREACH(sp, spl, node)
230 if (shaper_profile_id == sp->shaper_profile_id)
236 static struct tm_shared_shaper *
237 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
239 struct pmd_internals *p = dev->data->dev_private;
240 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
241 struct tm_shared_shaper *ss;
243 TAILQ_FOREACH(ss, ssl, node)
244 if (shared_shaper_id == ss->shared_shaper_id)
250 static struct tm_wred_profile *
251 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
253 struct pmd_internals *p = dev->data->dev_private;
254 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
255 struct tm_wred_profile *wp;
257 TAILQ_FOREACH(wp, wpl, node)
258 if (wred_profile_id == wp->wred_profile_id)
264 static struct tm_node *
265 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
267 struct pmd_internals *p = dev->data->dev_private;
268 struct tm_node_list *nl = &p->soft.tm.h.nodes;
271 TAILQ_FOREACH(n, nl, node)
272 if (n->node_id == node_id)
278 static struct tm_node *
279 tm_root_node_present(struct rte_eth_dev *dev)
281 struct pmd_internals *p = dev->data->dev_private;
282 struct tm_node_list *nl = &p->soft.tm.h.nodes;
285 TAILQ_FOREACH(n, nl, node)
286 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
293 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
295 struct pmd_internals *p = dev->data->dev_private;
296 struct tm_node_list *nl = &p->soft.tm.h.nodes;
301 TAILQ_FOREACH(ns, nl, node) {
302 if (ns->level != TM_NODE_LEVEL_SUBPORT)
305 if (ns->node_id == subport_node->node_id)
315 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
317 struct pmd_internals *p = dev->data->dev_private;
318 struct tm_node_list *nl = &p->soft.tm.h.nodes;
323 TAILQ_FOREACH(np, nl, node) {
324 if (np->level != TM_NODE_LEVEL_PIPE ||
325 np->parent_node_id != pipe_node->parent_node_id)
328 if (np->node_id == pipe_node->node_id)
338 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
340 return tc_node->priority;
344 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
346 struct pmd_internals *p = dev->data->dev_private;
347 struct tm_node_list *nl = &p->soft.tm.h.nodes;
352 TAILQ_FOREACH(nq, nl, node) {
353 if (nq->level != TM_NODE_LEVEL_QUEUE ||
354 nq->parent_node_id != queue_node->parent_node_id)
357 if (nq->node_id == queue_node->node_id)
367 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
369 struct pmd_internals *p = dev->data->dev_private;
370 uint32_t n_queues_max = p->params.tm.n_queues;
372 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
373 / RTE_SCHED_QUEUES_PER_PIPE;
374 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
375 uint32_t n_subports_max = n_pipes_max;
376 uint32_t n_root_max = 1;
379 case TM_NODE_LEVEL_PORT:
381 case TM_NODE_LEVEL_SUBPORT:
382 return n_subports_max;
383 case TM_NODE_LEVEL_PIPE:
385 case TM_NODE_LEVEL_TC:
387 case TM_NODE_LEVEL_QUEUE:
393 /* Traffic manager node type get */
395 pmd_tm_node_type_get(struct rte_eth_dev *dev,
398 struct rte_tm_error *error)
400 struct pmd_internals *p = dev->data->dev_private;
403 return -rte_tm_error_set(error,
405 RTE_TM_ERROR_TYPE_UNSPECIFIED,
407 rte_strerror(EINVAL));
409 if (node_id == RTE_TM_NODE_ID_NULL ||
410 (tm_node_search(dev, node_id) == NULL))
411 return -rte_tm_error_set(error,
413 RTE_TM_ERROR_TYPE_NODE_ID,
415 rte_strerror(EINVAL));
417 *is_leaf = node_id < p->params.tm.n_queues;
423 #define WRED_SUPPORTED 1
425 #define WRED_SUPPORTED 0
428 #define STATS_MASK_DEFAULT \
429 (RTE_TM_STATS_N_PKTS | \
430 RTE_TM_STATS_N_BYTES | \
431 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
432 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
434 #define STATS_MASK_QUEUE \
435 (STATS_MASK_DEFAULT | \
436 RTE_TM_STATS_N_PKTS_QUEUED)
438 static const struct rte_tm_capabilities tm_cap = {
439 .n_nodes_max = UINT32_MAX,
440 .n_levels_max = TM_NODE_LEVEL_MAX,
442 .non_leaf_nodes_identical = 0,
443 .leaf_nodes_identical = 1,
445 .shaper_n_max = UINT32_MAX,
446 .shaper_private_n_max = UINT32_MAX,
447 .shaper_private_dual_rate_n_max = 0,
448 .shaper_private_rate_min = 1,
449 .shaper_private_rate_max = UINT32_MAX,
451 .shaper_shared_n_max = UINT32_MAX,
452 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
453 .shaper_shared_n_shapers_per_node_max = 1,
454 .shaper_shared_dual_rate_n_max = 0,
455 .shaper_shared_rate_min = 1,
456 .shaper_shared_rate_max = UINT32_MAX,
458 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
459 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
461 .sched_n_children_max = UINT32_MAX,
462 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
463 .sched_wfq_n_children_per_group_max = UINT32_MAX,
464 .sched_wfq_n_groups_max = 1,
465 .sched_wfq_weight_max = UINT32_MAX,
467 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
468 .cman_wred_byte_mode_supported = 0,
469 .cman_head_drop_supported = 0,
470 .cman_wred_context_n_max = 0,
471 .cman_wred_context_private_n_max = 0,
472 .cman_wred_context_shared_n_max = 0,
473 .cman_wred_context_shared_n_nodes_per_context_max = 0,
474 .cman_wred_context_shared_n_contexts_per_node_max = 0,
476 .mark_vlan_dei_supported = {0, 0, 0},
477 .mark_ip_ecn_tcp_supported = {0, 0, 0},
478 .mark_ip_ecn_sctp_supported = {0, 0, 0},
479 .mark_ip_dscp_supported = {0, 0, 0},
481 .dynamic_update_mask = 0,
483 .stats_mask = STATS_MASK_QUEUE,
486 /* Traffic manager capabilities get */
488 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
489 struct rte_tm_capabilities *cap,
490 struct rte_tm_error *error)
493 return -rte_tm_error_set(error,
495 RTE_TM_ERROR_TYPE_CAPABILITIES,
497 rte_strerror(EINVAL));
499 memcpy(cap, &tm_cap, sizeof(*cap));
501 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
502 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
503 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
504 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
505 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
507 cap->shaper_private_n_max =
508 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
509 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
510 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
511 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
513 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
514 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
516 cap->shaper_n_max = cap->shaper_private_n_max +
517 cap->shaper_shared_n_max;
519 cap->shaper_shared_n_nodes_per_shaper_max =
520 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
522 cap->sched_n_children_max = RTE_MAX(
523 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
524 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
526 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
529 cap->cman_wred_context_private_n_max =
530 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
532 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
533 cap->cman_wred_context_shared_n_max;
538 static const struct rte_tm_level_capabilities tm_level_cap[] = {
539 [TM_NODE_LEVEL_PORT] = {
541 .n_nodes_nonleaf_max = 1,
542 .n_nodes_leaf_max = 0,
543 .non_leaf_nodes_identical = 1,
544 .leaf_nodes_identical = 0,
547 .shaper_private_supported = 1,
548 .shaper_private_dual_rate_supported = 0,
549 .shaper_private_rate_min = 1,
550 .shaper_private_rate_max = UINT32_MAX,
551 .shaper_shared_n_max = 0,
553 .sched_n_children_max = UINT32_MAX,
554 .sched_sp_n_priorities_max = 1,
555 .sched_wfq_n_children_per_group_max = UINT32_MAX,
556 .sched_wfq_n_groups_max = 1,
557 .sched_wfq_weight_max = 1,
559 .stats_mask = STATS_MASK_DEFAULT,
563 [TM_NODE_LEVEL_SUBPORT] = {
564 .n_nodes_max = UINT32_MAX,
565 .n_nodes_nonleaf_max = UINT32_MAX,
566 .n_nodes_leaf_max = 0,
567 .non_leaf_nodes_identical = 1,
568 .leaf_nodes_identical = 0,
571 .shaper_private_supported = 1,
572 .shaper_private_dual_rate_supported = 0,
573 .shaper_private_rate_min = 1,
574 .shaper_private_rate_max = UINT32_MAX,
575 .shaper_shared_n_max = 0,
577 .sched_n_children_max = UINT32_MAX,
578 .sched_sp_n_priorities_max = 1,
579 .sched_wfq_n_children_per_group_max = UINT32_MAX,
580 .sched_wfq_n_groups_max = 1,
581 #ifdef RTE_SCHED_SUBPORT_TC_OV
582 .sched_wfq_weight_max = UINT32_MAX,
584 .sched_wfq_weight_max = 1,
586 .stats_mask = STATS_MASK_DEFAULT,
590 [TM_NODE_LEVEL_PIPE] = {
591 .n_nodes_max = UINT32_MAX,
592 .n_nodes_nonleaf_max = UINT32_MAX,
593 .n_nodes_leaf_max = 0,
594 .non_leaf_nodes_identical = 1,
595 .leaf_nodes_identical = 0,
598 .shaper_private_supported = 1,
599 .shaper_private_dual_rate_supported = 0,
600 .shaper_private_rate_min = 1,
601 .shaper_private_rate_max = UINT32_MAX,
602 .shaper_shared_n_max = 0,
604 .sched_n_children_max =
605 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
606 .sched_sp_n_priorities_max =
607 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
608 .sched_wfq_n_children_per_group_max = 1,
609 .sched_wfq_n_groups_max = 0,
610 .sched_wfq_weight_max = 1,
612 .stats_mask = STATS_MASK_DEFAULT,
616 [TM_NODE_LEVEL_TC] = {
617 .n_nodes_max = UINT32_MAX,
618 .n_nodes_nonleaf_max = UINT32_MAX,
619 .n_nodes_leaf_max = 0,
620 .non_leaf_nodes_identical = 1,
621 .leaf_nodes_identical = 0,
624 .shaper_private_supported = 1,
625 .shaper_private_dual_rate_supported = 0,
626 .shaper_private_rate_min = 1,
627 .shaper_private_rate_max = UINT32_MAX,
628 .shaper_shared_n_max = 1,
630 .sched_n_children_max =
631 RTE_SCHED_BE_QUEUES_PER_PIPE,
632 .sched_sp_n_priorities_max = 1,
633 .sched_wfq_n_children_per_group_max =
634 RTE_SCHED_BE_QUEUES_PER_PIPE,
635 .sched_wfq_n_groups_max = 1,
636 .sched_wfq_weight_max = UINT32_MAX,
638 .stats_mask = STATS_MASK_DEFAULT,
642 [TM_NODE_LEVEL_QUEUE] = {
643 .n_nodes_max = UINT32_MAX,
644 .n_nodes_nonleaf_max = 0,
645 .n_nodes_leaf_max = UINT32_MAX,
646 .non_leaf_nodes_identical = 0,
647 .leaf_nodes_identical = 1,
650 .shaper_private_supported = 0,
651 .shaper_private_dual_rate_supported = 0,
652 .shaper_private_rate_min = 0,
653 .shaper_private_rate_max = 0,
654 .shaper_shared_n_max = 0,
656 .cman_head_drop_supported = 0,
657 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
658 .cman_wred_byte_mode_supported = 0,
659 .cman_wred_context_private_supported = WRED_SUPPORTED,
660 .cman_wred_context_shared_n_max = 0,
662 .stats_mask = STATS_MASK_QUEUE,
667 /* Traffic manager level capabilities get */
669 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
671 struct rte_tm_level_capabilities *cap,
672 struct rte_tm_error *error)
675 return -rte_tm_error_set(error,
677 RTE_TM_ERROR_TYPE_CAPABILITIES,
679 rte_strerror(EINVAL));
681 if (level_id >= TM_NODE_LEVEL_MAX)
682 return -rte_tm_error_set(error,
684 RTE_TM_ERROR_TYPE_LEVEL_ID,
686 rte_strerror(EINVAL));
688 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
691 case TM_NODE_LEVEL_PORT:
692 cap->nonleaf.sched_n_children_max =
693 tm_level_get_max_nodes(dev,
694 TM_NODE_LEVEL_SUBPORT);
695 cap->nonleaf.sched_wfq_n_children_per_group_max =
696 cap->nonleaf.sched_n_children_max;
699 case TM_NODE_LEVEL_SUBPORT:
700 cap->n_nodes_max = tm_level_get_max_nodes(dev,
701 TM_NODE_LEVEL_SUBPORT);
702 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
703 cap->nonleaf.sched_n_children_max =
704 tm_level_get_max_nodes(dev,
706 cap->nonleaf.sched_wfq_n_children_per_group_max =
707 cap->nonleaf.sched_n_children_max;
710 case TM_NODE_LEVEL_PIPE:
711 cap->n_nodes_max = tm_level_get_max_nodes(dev,
713 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
716 case TM_NODE_LEVEL_TC:
717 cap->n_nodes_max = tm_level_get_max_nodes(dev,
719 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
722 case TM_NODE_LEVEL_QUEUE:
724 cap->n_nodes_max = tm_level_get_max_nodes(dev,
725 TM_NODE_LEVEL_QUEUE);
726 cap->n_nodes_leaf_max = cap->n_nodes_max;
733 static const struct rte_tm_node_capabilities tm_node_cap[] = {
734 [TM_NODE_LEVEL_PORT] = {
735 .shaper_private_supported = 1,
736 .shaper_private_dual_rate_supported = 0,
737 .shaper_private_rate_min = 1,
738 .shaper_private_rate_max = UINT32_MAX,
739 .shaper_shared_n_max = 0,
742 .sched_n_children_max = UINT32_MAX,
743 .sched_sp_n_priorities_max = 1,
744 .sched_wfq_n_children_per_group_max = UINT32_MAX,
745 .sched_wfq_n_groups_max = 1,
746 .sched_wfq_weight_max = 1,
749 .stats_mask = STATS_MASK_DEFAULT,
752 [TM_NODE_LEVEL_SUBPORT] = {
753 .shaper_private_supported = 1,
754 .shaper_private_dual_rate_supported = 0,
755 .shaper_private_rate_min = 1,
756 .shaper_private_rate_max = UINT32_MAX,
757 .shaper_shared_n_max = 0,
760 .sched_n_children_max = UINT32_MAX,
761 .sched_sp_n_priorities_max = 1,
762 .sched_wfq_n_children_per_group_max = UINT32_MAX,
763 .sched_wfq_n_groups_max = 1,
764 .sched_wfq_weight_max = UINT32_MAX,
767 .stats_mask = STATS_MASK_DEFAULT,
770 [TM_NODE_LEVEL_PIPE] = {
771 .shaper_private_supported = 1,
772 .shaper_private_dual_rate_supported = 0,
773 .shaper_private_rate_min = 1,
774 .shaper_private_rate_max = UINT32_MAX,
775 .shaper_shared_n_max = 0,
778 .sched_n_children_max =
779 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
780 .sched_sp_n_priorities_max =
781 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
782 .sched_wfq_n_children_per_group_max = 1,
783 .sched_wfq_n_groups_max = 0,
784 .sched_wfq_weight_max = 1,
787 .stats_mask = STATS_MASK_DEFAULT,
790 [TM_NODE_LEVEL_TC] = {
791 .shaper_private_supported = 1,
792 .shaper_private_dual_rate_supported = 0,
793 .shaper_private_rate_min = 1,
794 .shaper_private_rate_max = UINT32_MAX,
795 .shaper_shared_n_max = 1,
798 .sched_n_children_max =
799 RTE_SCHED_BE_QUEUES_PER_PIPE,
800 .sched_sp_n_priorities_max = 1,
801 .sched_wfq_n_children_per_group_max =
802 RTE_SCHED_BE_QUEUES_PER_PIPE,
803 .sched_wfq_n_groups_max = 1,
804 .sched_wfq_weight_max = UINT32_MAX,
807 .stats_mask = STATS_MASK_DEFAULT,
810 [TM_NODE_LEVEL_QUEUE] = {
811 .shaper_private_supported = 0,
812 .shaper_private_dual_rate_supported = 0,
813 .shaper_private_rate_min = 0,
814 .shaper_private_rate_max = 0,
815 .shaper_shared_n_max = 0,
819 .cman_head_drop_supported = 0,
820 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
821 .cman_wred_byte_mode_supported = 0,
822 .cman_wred_context_private_supported = WRED_SUPPORTED,
823 .cman_wred_context_shared_n_max = 0,
826 .stats_mask = STATS_MASK_QUEUE,
830 /* Traffic manager node capabilities get */
832 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
834 struct rte_tm_node_capabilities *cap,
835 struct rte_tm_error *error)
837 struct tm_node *tm_node;
840 return -rte_tm_error_set(error,
842 RTE_TM_ERROR_TYPE_CAPABILITIES,
844 rte_strerror(EINVAL));
846 tm_node = tm_node_search(dev, node_id);
848 return -rte_tm_error_set(error,
850 RTE_TM_ERROR_TYPE_NODE_ID,
852 rte_strerror(EINVAL));
854 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
856 switch (tm_node->level) {
857 case TM_NODE_LEVEL_PORT:
858 cap->nonleaf.sched_n_children_max =
859 tm_level_get_max_nodes(dev,
860 TM_NODE_LEVEL_SUBPORT);
861 cap->nonleaf.sched_wfq_n_children_per_group_max =
862 cap->nonleaf.sched_n_children_max;
865 case TM_NODE_LEVEL_SUBPORT:
866 cap->nonleaf.sched_n_children_max =
867 tm_level_get_max_nodes(dev,
869 cap->nonleaf.sched_wfq_n_children_per_group_max =
870 cap->nonleaf.sched_n_children_max;
873 case TM_NODE_LEVEL_PIPE:
874 case TM_NODE_LEVEL_TC:
875 case TM_NODE_LEVEL_QUEUE:
884 shaper_profile_check(struct rte_eth_dev *dev,
885 uint32_t shaper_profile_id,
886 struct rte_tm_shaper_params *profile,
887 struct rte_tm_error *error)
889 struct tm_shaper_profile *sp;
891 /* Shaper profile ID must not be NONE. */
892 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
893 return -rte_tm_error_set(error,
895 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
897 rte_strerror(EINVAL));
899 /* Shaper profile must not exist. */
900 sp = tm_shaper_profile_search(dev, shaper_profile_id);
902 return -rte_tm_error_set(error,
904 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
906 rte_strerror(EEXIST));
908 /* Profile must not be NULL. */
910 return -rte_tm_error_set(error,
912 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
914 rte_strerror(EINVAL));
916 /* Peak rate: non-zero, 32-bit */
917 if (profile->peak.rate == 0 ||
918 profile->peak.rate >= UINT32_MAX)
919 return -rte_tm_error_set(error,
921 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
923 rte_strerror(EINVAL));
925 /* Peak size: non-zero, 32-bit */
926 if (profile->peak.size == 0 ||
927 profile->peak.size >= UINT32_MAX)
928 return -rte_tm_error_set(error,
930 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
932 rte_strerror(EINVAL));
934 /* Dual-rate profiles are not supported. */
935 if (profile->committed.rate != 0)
936 return -rte_tm_error_set(error,
938 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
940 rte_strerror(EINVAL));
942 /* Packet length adjust: 24 bytes */
943 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
944 return -rte_tm_error_set(error,
946 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
948 rte_strerror(EINVAL));
953 /* Traffic manager shaper profile add */
955 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
956 uint32_t shaper_profile_id,
957 struct rte_tm_shaper_params *profile,
958 struct rte_tm_error *error)
960 struct pmd_internals *p = dev->data->dev_private;
961 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
962 struct tm_shaper_profile *sp;
965 /* Check input params */
966 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
970 /* Memory allocation */
971 sp = calloc(1, sizeof(struct tm_shaper_profile));
973 return -rte_tm_error_set(error,
975 RTE_TM_ERROR_TYPE_UNSPECIFIED,
977 rte_strerror(ENOMEM));
980 sp->shaper_profile_id = shaper_profile_id;
981 memcpy(&sp->params, profile, sizeof(sp->params));
984 TAILQ_INSERT_TAIL(spl, sp, node);
985 p->soft.tm.h.n_shaper_profiles++;
990 /* Traffic manager shaper profile delete */
992 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
993 uint32_t shaper_profile_id,
994 struct rte_tm_error *error)
996 struct pmd_internals *p = dev->data->dev_private;
997 struct tm_shaper_profile *sp;
1000 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1002 return -rte_tm_error_set(error,
1004 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1006 rte_strerror(EINVAL));
1010 return -rte_tm_error_set(error,
1012 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1014 rte_strerror(EBUSY));
1016 /* Remove from list */
1017 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1018 p->soft.tm.h.n_shaper_profiles--;
1024 static struct tm_node *
1025 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1026 struct tm_shared_shaper *ss)
1028 struct pmd_internals *p = dev->data->dev_private;
1029 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1032 /* Subport: each TC uses shared shaper */
1033 TAILQ_FOREACH(n, nl, node) {
1034 if (n->level != TM_NODE_LEVEL_TC ||
1035 n->params.n_shared_shapers == 0 ||
1036 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1046 update_subport_tc_rate(struct rte_eth_dev *dev,
1048 struct tm_shared_shaper *ss,
1049 struct tm_shaper_profile *sp_new)
1051 struct pmd_internals *p = dev->data->dev_private;
1052 uint32_t tc_id = tm_node_tc_id(dev, nt);
1054 struct tm_node *np = nt->parent_node;
1056 struct tm_node *ns = np->parent_node;
1057 uint32_t subport_id = tm_node_subport_id(dev, ns);
1059 struct rte_sched_subport_params subport_params;
1061 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1062 ss->shaper_profile_id);
1064 /* Derive new subport configuration. */
1065 memcpy(&subport_params,
1066 &p->soft.tm.params.subport_params[subport_id],
1067 sizeof(subport_params));
1068 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1070 /* Update the subport configuration. */
1071 if (rte_sched_subport_config(SCHED(p),
1072 subport_id, &subport_params))
1075 /* Commit changes. */
1078 ss->shaper_profile_id = sp_new->shaper_profile_id;
1081 memcpy(&p->soft.tm.params.subport_params[subport_id],
1083 sizeof(subport_params));
1088 /* Traffic manager shared shaper add/update */
1090 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1091 uint32_t shared_shaper_id,
1092 uint32_t shaper_profile_id,
1093 struct rte_tm_error *error)
1095 struct pmd_internals *p = dev->data->dev_private;
1096 struct tm_shared_shaper *ss;
1097 struct tm_shaper_profile *sp;
1100 /* Shaper profile must be valid. */
1101 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1103 return -rte_tm_error_set(error,
1105 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1107 rte_strerror(EINVAL));
1110 * Add new shared shaper
1112 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1114 struct tm_shared_shaper_list *ssl =
1115 &p->soft.tm.h.shared_shapers;
1117 /* Hierarchy must not be frozen */
1118 if (p->soft.tm.hierarchy_frozen)
1119 return -rte_tm_error_set(error,
1121 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1123 rte_strerror(EBUSY));
1125 /* Memory allocation */
1126 ss = calloc(1, sizeof(struct tm_shared_shaper));
1128 return -rte_tm_error_set(error,
1130 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1132 rte_strerror(ENOMEM));
1135 ss->shared_shaper_id = shared_shaper_id;
1136 ss->shaper_profile_id = shaper_profile_id;
1139 TAILQ_INSERT_TAIL(ssl, ss, node);
1140 p->soft.tm.h.n_shared_shapers++;
1146 * Update existing shared shaper
1148 /* Hierarchy must be frozen (run-time update) */
1149 if (p->soft.tm.hierarchy_frozen == 0)
1150 return -rte_tm_error_set(error,
1152 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1154 rte_strerror(EBUSY));
1157 /* Propagate change. */
1158 nt = tm_shared_shaper_get_tc(dev, ss);
1159 if (update_subport_tc_rate(dev, nt, ss, sp))
1160 return -rte_tm_error_set(error,
1162 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1164 rte_strerror(EINVAL));
1169 /* Traffic manager shared shaper delete */
1171 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1172 uint32_t shared_shaper_id,
1173 struct rte_tm_error *error)
1175 struct pmd_internals *p = dev->data->dev_private;
1176 struct tm_shared_shaper *ss;
1178 /* Check existing */
1179 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1181 return -rte_tm_error_set(error,
1183 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1185 rte_strerror(EINVAL));
1189 return -rte_tm_error_set(error,
1191 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1193 rte_strerror(EBUSY));
1195 /* Remove from list */
1196 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1197 p->soft.tm.h.n_shared_shapers--;
1204 wred_profile_check(struct rte_eth_dev *dev,
1205 uint32_t wred_profile_id,
1206 struct rte_tm_wred_params *profile,
1207 struct rte_tm_error *error)
1209 struct tm_wred_profile *wp;
1210 enum rte_color color;
1212 /* WRED profile ID must not be NONE. */
1213 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1214 return -rte_tm_error_set(error,
1216 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1218 rte_strerror(EINVAL));
1220 /* WRED profile must not exist. */
1221 wp = tm_wred_profile_search(dev, wred_profile_id);
1223 return -rte_tm_error_set(error,
1225 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1227 rte_strerror(EEXIST));
1229 /* Profile must not be NULL. */
1230 if (profile == NULL)
1231 return -rte_tm_error_set(error,
1233 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1235 rte_strerror(EINVAL));
1237 /* WRED profile should be in packet mode */
1238 if (profile->packet_mode == 0)
1239 return -rte_tm_error_set(error,
1241 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1243 rte_strerror(ENOTSUP));
1245 /* min_th <= max_th, max_th > 0 */
1246 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1247 uint32_t min_th = profile->red_params[color].min_th;
1248 uint32_t max_th = profile->red_params[color].max_th;
1250 if (min_th > max_th ||
1252 min_th > UINT16_MAX ||
1253 max_th > UINT16_MAX)
1254 return -rte_tm_error_set(error,
1256 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1258 rte_strerror(EINVAL));
1264 /* Traffic manager WRED profile add */
1266 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1267 uint32_t wred_profile_id,
1268 struct rte_tm_wred_params *profile,
1269 struct rte_tm_error *error)
1271 struct pmd_internals *p = dev->data->dev_private;
1272 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1273 struct tm_wred_profile *wp;
1276 /* Check input params */
1277 status = wred_profile_check(dev, wred_profile_id, profile, error);
1281 /* Memory allocation */
1282 wp = calloc(1, sizeof(struct tm_wred_profile));
1284 return -rte_tm_error_set(error,
1286 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1288 rte_strerror(ENOMEM));
1291 wp->wred_profile_id = wred_profile_id;
1292 memcpy(&wp->params, profile, sizeof(wp->params));
1295 TAILQ_INSERT_TAIL(wpl, wp, node);
1296 p->soft.tm.h.n_wred_profiles++;
1301 /* Traffic manager WRED profile delete */
1303 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1304 uint32_t wred_profile_id,
1305 struct rte_tm_error *error)
1307 struct pmd_internals *p = dev->data->dev_private;
1308 struct tm_wred_profile *wp;
1310 /* Check existing */
1311 wp = tm_wred_profile_search(dev, wred_profile_id);
1313 return -rte_tm_error_set(error,
1315 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1317 rte_strerror(EINVAL));
1321 return -rte_tm_error_set(error,
1323 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1325 rte_strerror(EBUSY));
1327 /* Remove from list */
1328 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1329 p->soft.tm.h.n_wred_profiles--;
1336 node_add_check_port(struct rte_eth_dev *dev,
1338 uint32_t parent_node_id __rte_unused,
1341 uint32_t level_id __rte_unused,
1342 struct rte_tm_node_params *params,
1343 struct rte_tm_error *error)
1345 struct pmd_internals *p = dev->data->dev_private;
1346 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1347 params->shaper_profile_id);
1349 /* node type: non-leaf */
1350 if (node_id < p->params.tm.n_queues)
1351 return -rte_tm_error_set(error,
1353 RTE_TM_ERROR_TYPE_NODE_ID,
1355 rte_strerror(EINVAL));
1357 /* Priority must be 0 */
1359 return -rte_tm_error_set(error,
1361 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1363 rte_strerror(EINVAL));
1365 /* Weight must be 1 */
1367 return -rte_tm_error_set(error,
1369 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1371 rte_strerror(EINVAL));
1373 /* Shaper must be valid */
1374 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1376 return -rte_tm_error_set(error,
1378 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1380 rte_strerror(EINVAL));
1382 /* No shared shapers */
1383 if (params->n_shared_shapers != 0)
1384 return -rte_tm_error_set(error,
1386 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1388 rte_strerror(EINVAL));
1390 /* Number of SP priorities must be 1 */
1391 if (params->nonleaf.n_sp_priorities != 1)
1392 return -rte_tm_error_set(error,
1394 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1396 rte_strerror(EINVAL));
1399 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1400 return -rte_tm_error_set(error,
1402 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1404 rte_strerror(EINVAL));
1410 node_add_check_subport(struct rte_eth_dev *dev,
1412 uint32_t parent_node_id __rte_unused,
1415 uint32_t level_id __rte_unused,
1416 struct rte_tm_node_params *params,
1417 struct rte_tm_error *error)
1419 struct pmd_internals *p = dev->data->dev_private;
1421 /* node type: non-leaf */
1422 if (node_id < p->params.tm.n_queues)
1423 return -rte_tm_error_set(error,
1425 RTE_TM_ERROR_TYPE_NODE_ID,
1427 rte_strerror(EINVAL));
1429 /* Priority must be 0 */
1431 return -rte_tm_error_set(error,
1433 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1435 rte_strerror(EINVAL));
1437 /* Weight must be 1 */
1439 return -rte_tm_error_set(error,
1441 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1443 rte_strerror(EINVAL));
1445 /* Shaper must be valid */
1446 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1447 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1448 return -rte_tm_error_set(error,
1450 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1452 rte_strerror(EINVAL));
1454 /* No shared shapers */
1455 if (params->n_shared_shapers != 0)
1456 return -rte_tm_error_set(error,
1458 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1460 rte_strerror(EINVAL));
1462 /* Number of SP priorities must be 1 */
1463 if (params->nonleaf.n_sp_priorities != 1)
1464 return -rte_tm_error_set(error,
1466 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1468 rte_strerror(EINVAL));
1471 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1472 return -rte_tm_error_set(error,
1474 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1476 rte_strerror(EINVAL));
1482 node_add_check_pipe(struct rte_eth_dev *dev,
1484 uint32_t parent_node_id __rte_unused,
1486 uint32_t weight __rte_unused,
1487 uint32_t level_id __rte_unused,
1488 struct rte_tm_node_params *params,
1489 struct rte_tm_error *error)
1491 struct pmd_internals *p = dev->data->dev_private;
1493 /* node type: non-leaf */
1494 if (node_id < p->params.tm.n_queues)
1495 return -rte_tm_error_set(error,
1497 RTE_TM_ERROR_TYPE_NODE_ID,
1499 rte_strerror(EINVAL));
1501 /* Priority must be 0 */
1503 return -rte_tm_error_set(error,
1505 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1507 rte_strerror(EINVAL));
1509 /* Shaper must be valid */
1510 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1511 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1512 return -rte_tm_error_set(error,
1514 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1516 rte_strerror(EINVAL));
1518 /* No shared shapers */
1519 if (params->n_shared_shapers != 0)
1520 return -rte_tm_error_set(error,
1522 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1524 rte_strerror(EINVAL));
1526 /* Number of SP priorities must be 4 */
1527 if (params->nonleaf.n_sp_priorities !=
1528 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1529 return -rte_tm_error_set(error,
1531 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1533 rte_strerror(EINVAL));
1535 /* WFQ mode must be byte mode */
1536 if (params->nonleaf.wfq_weight_mode != NULL &&
1537 params->nonleaf.wfq_weight_mode[0] != 0 &&
1538 params->nonleaf.wfq_weight_mode[1] != 0 &&
1539 params->nonleaf.wfq_weight_mode[2] != 0 &&
1540 params->nonleaf.wfq_weight_mode[3] != 0)
1541 return -rte_tm_error_set(error,
1543 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1545 rte_strerror(EINVAL));
1548 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1549 return -rte_tm_error_set(error,
1551 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1553 rte_strerror(EINVAL));
1559 node_add_check_tc(struct rte_eth_dev *dev,
1561 uint32_t parent_node_id __rte_unused,
1562 uint32_t priority __rte_unused,
1564 uint32_t level_id __rte_unused,
1565 struct rte_tm_node_params *params,
1566 struct rte_tm_error *error)
1568 struct pmd_internals *p = dev->data->dev_private;
1570 /* node type: non-leaf */
1571 if (node_id < p->params.tm.n_queues)
1572 return -rte_tm_error_set(error,
1574 RTE_TM_ERROR_TYPE_NODE_ID,
1576 rte_strerror(EINVAL));
1578 /* Weight must be 1 */
1580 return -rte_tm_error_set(error,
1582 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1584 rte_strerror(EINVAL));
1586 /* Shaper must be valid */
1587 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1588 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1589 return -rte_tm_error_set(error,
1591 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1593 rte_strerror(EINVAL));
1595 /* Single valid shared shaper */
1596 if (params->n_shared_shapers > 1)
1597 return -rte_tm_error_set(error,
1599 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1601 rte_strerror(EINVAL));
1603 if (params->n_shared_shapers == 1 &&
1604 (params->shared_shaper_id == NULL ||
1605 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1606 return -rte_tm_error_set(error,
1608 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1610 rte_strerror(EINVAL));
1612 /* Number of priorities must be 1 */
1613 if (params->nonleaf.n_sp_priorities != 1)
1614 return -rte_tm_error_set(error,
1616 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1618 rte_strerror(EINVAL));
1621 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1622 return -rte_tm_error_set(error,
1624 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1626 rte_strerror(EINVAL));
1632 node_add_check_queue(struct rte_eth_dev *dev,
1634 uint32_t parent_node_id __rte_unused,
1636 uint32_t weight __rte_unused,
1637 uint32_t level_id __rte_unused,
1638 struct rte_tm_node_params *params,
1639 struct rte_tm_error *error)
1641 struct pmd_internals *p = dev->data->dev_private;
1643 /* node type: leaf */
1644 if (node_id >= p->params.tm.n_queues)
1645 return -rte_tm_error_set(error,
1647 RTE_TM_ERROR_TYPE_NODE_ID,
1649 rte_strerror(EINVAL));
1651 /* Priority must be 0 */
1653 return -rte_tm_error_set(error,
1655 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1657 rte_strerror(EINVAL));
1660 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1661 return -rte_tm_error_set(error,
1663 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1665 rte_strerror(EINVAL));
1667 /* No shared shapers */
1668 if (params->n_shared_shapers != 0)
1669 return -rte_tm_error_set(error,
1671 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1673 rte_strerror(EINVAL));
1675 /* Congestion management must not be head drop */
1676 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1677 return -rte_tm_error_set(error,
1679 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1681 rte_strerror(EINVAL));
1683 /* Congestion management set to WRED */
1684 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1685 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1686 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1689 /* WRED profile (for private WRED context) must be valid */
1690 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1692 return -rte_tm_error_set(error,
1694 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1696 rte_strerror(EINVAL));
1698 /* No shared WRED contexts */
1699 if (params->leaf.wred.n_shared_wred_contexts != 0)
1700 return -rte_tm_error_set(error,
1702 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1704 rte_strerror(EINVAL));
1708 if (params->stats_mask & ~STATS_MASK_QUEUE)
1709 return -rte_tm_error_set(error,
1711 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1713 rte_strerror(EINVAL));
1719 node_add_check(struct rte_eth_dev *dev,
1721 uint32_t parent_node_id,
1725 struct rte_tm_node_params *params,
1726 struct rte_tm_error *error)
1732 /* node_id, parent_node_id:
1733 * -node_id must not be RTE_TM_NODE_ID_NULL
1734 * -node_id must not be in use
1735 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1736 * -root node must not exist
1737 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1738 * -parent_node_id must be valid
1740 if (node_id == RTE_TM_NODE_ID_NULL)
1741 return -rte_tm_error_set(error,
1743 RTE_TM_ERROR_TYPE_NODE_ID,
1745 rte_strerror(EINVAL));
1747 if (tm_node_search(dev, node_id))
1748 return -rte_tm_error_set(error,
1750 RTE_TM_ERROR_TYPE_NODE_ID,
1752 rte_strerror(EEXIST));
1754 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1756 if (tm_root_node_present(dev))
1757 return -rte_tm_error_set(error,
1759 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1761 rte_strerror(EEXIST));
1763 pn = tm_node_search(dev, parent_node_id);
1765 return -rte_tm_error_set(error,
1767 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1769 rte_strerror(EINVAL));
1772 /* priority: must be 0 .. 3 */
1773 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1774 return -rte_tm_error_set(error,
1776 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1778 rte_strerror(EINVAL));
1780 /* weight: must be 1 .. 255 */
1781 if (weight == 0 || weight >= UINT8_MAX)
1782 return -rte_tm_error_set(error,
1784 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1786 rte_strerror(EINVAL));
1788 /* level_id: if valid, then
1789 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1790 * -level_id must be zero
1791 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1792 * -level_id must be parent level ID plus one
1794 level = (pn == NULL) ? 0 : pn->level + 1;
1795 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1796 return -rte_tm_error_set(error,
1798 RTE_TM_ERROR_TYPE_LEVEL_ID,
1800 rte_strerror(EINVAL));
1802 /* params: must not be NULL */
1804 return -rte_tm_error_set(error,
1806 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1808 rte_strerror(EINVAL));
1810 /* params: per level checks */
1812 case TM_NODE_LEVEL_PORT:
1813 status = node_add_check_port(dev, node_id,
1814 parent_node_id, priority, weight, level_id,
1820 case TM_NODE_LEVEL_SUBPORT:
1821 status = node_add_check_subport(dev, node_id,
1822 parent_node_id, priority, weight, level_id,
1828 case TM_NODE_LEVEL_PIPE:
1829 status = node_add_check_pipe(dev, node_id,
1830 parent_node_id, priority, weight, level_id,
1836 case TM_NODE_LEVEL_TC:
1837 status = node_add_check_tc(dev, node_id,
1838 parent_node_id, priority, weight, level_id,
1844 case TM_NODE_LEVEL_QUEUE:
1845 status = node_add_check_queue(dev, node_id,
1846 parent_node_id, priority, weight, level_id,
1853 return -rte_tm_error_set(error,
1855 RTE_TM_ERROR_TYPE_LEVEL_ID,
1857 rte_strerror(EINVAL));
1863 /* Traffic manager node add */
1865 pmd_tm_node_add(struct rte_eth_dev *dev,
1867 uint32_t parent_node_id,
1871 struct rte_tm_node_params *params,
1872 struct rte_tm_error *error)
1874 struct pmd_internals *p = dev->data->dev_private;
1875 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1881 if (p->soft.tm.hierarchy_frozen)
1882 return -rte_tm_error_set(error,
1884 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1886 rte_strerror(EBUSY));
1888 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1889 level_id, params, error);
1893 /* Memory allocation */
1894 n = calloc(1, sizeof(struct tm_node));
1896 return -rte_tm_error_set(error,
1898 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1900 rte_strerror(ENOMEM));
1903 n->node_id = node_id;
1904 n->parent_node_id = parent_node_id;
1905 n->priority = priority;
1908 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1909 n->parent_node = tm_node_search(dev, parent_node_id);
1910 n->level = n->parent_node->level + 1;
1913 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1914 n->shaper_profile = tm_shaper_profile_search(dev,
1915 params->shaper_profile_id);
1917 if (n->level == TM_NODE_LEVEL_QUEUE &&
1918 params->leaf.cman == RTE_TM_CMAN_WRED)
1919 n->wred_profile = tm_wred_profile_search(dev,
1920 params->leaf.wred.wred_profile_id);
1922 memcpy(&n->params, params, sizeof(n->params));
1925 TAILQ_INSERT_TAIL(nl, n, node);
1926 p->soft.tm.h.n_nodes++;
1928 /* Update dependencies */
1930 n->parent_node->n_children++;
1932 if (n->shaper_profile)
1933 n->shaper_profile->n_users++;
1935 for (i = 0; i < params->n_shared_shapers; i++) {
1936 struct tm_shared_shaper *ss;
1938 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1942 if (n->wred_profile)
1943 n->wred_profile->n_users++;
1945 p->soft.tm.h.n_tm_nodes[n->level]++;
1950 /* Traffic manager node delete */
1952 pmd_tm_node_delete(struct rte_eth_dev *dev,
1954 struct rte_tm_error *error)
1956 struct pmd_internals *p = dev->data->dev_private;
1960 /* Check hierarchy changes are currently allowed */
1961 if (p->soft.tm.hierarchy_frozen)
1962 return -rte_tm_error_set(error,
1964 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1966 rte_strerror(EBUSY));
1968 /* Check existing */
1969 n = tm_node_search(dev, node_id);
1971 return -rte_tm_error_set(error,
1973 RTE_TM_ERROR_TYPE_NODE_ID,
1975 rte_strerror(EINVAL));
1979 return -rte_tm_error_set(error,
1981 RTE_TM_ERROR_TYPE_NODE_ID,
1983 rte_strerror(EBUSY));
1985 /* Update dependencies */
1986 p->soft.tm.h.n_tm_nodes[n->level]--;
1988 if (n->wred_profile)
1989 n->wred_profile->n_users--;
1991 for (i = 0; i < n->params.n_shared_shapers; i++) {
1992 struct tm_shared_shaper *ss;
1994 ss = tm_shared_shaper_search(dev,
1995 n->params.shared_shaper_id[i]);
1999 if (n->shaper_profile)
2000 n->shaper_profile->n_users--;
2003 n->parent_node->n_children--;
2005 /* Remove from list */
2006 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2007 p->soft.tm.h.n_nodes--;
2015 pipe_profile_build(struct rte_eth_dev *dev,
2017 struct rte_sched_pipe_params *pp)
2019 struct pmd_internals *p = dev->data->dev_private;
2020 struct tm_hierarchy *h = &p->soft.tm.h;
2021 struct tm_node_list *nl = &h->nodes;
2022 struct tm_node *nt, *nq;
2024 memset(pp, 0, sizeof(*pp));
2027 pp->tb_rate = np->shaper_profile->params.peak.rate;
2028 pp->tb_size = np->shaper_profile->params.peak.size;
2030 /* Traffic Class (TC) */
2031 pp->tc_period = PIPE_TC_PERIOD;
2033 pp->tc_ov_weight = np->weight;
2035 TAILQ_FOREACH(nt, nl, node) {
2036 uint32_t queue_id = 0;
2038 if (nt->level != TM_NODE_LEVEL_TC ||
2039 nt->parent_node_id != np->node_id)
2042 pp->tc_rate[nt->priority] =
2043 nt->shaper_profile->params.peak.rate;
2046 TAILQ_FOREACH(nq, nl, node) {
2048 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2049 nq->parent_node_id != nt->node_id)
2052 if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2053 pp->wrr_weights[queue_id] = nq->weight;
2061 pipe_profile_free_exists(struct rte_eth_dev *dev,
2062 uint32_t *pipe_profile_id)
2064 struct pmd_internals *p = dev->data->dev_private;
2065 struct tm_params *t = &p->soft.tm.params;
2067 if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2068 *pipe_profile_id = t->n_pipe_profiles;
2076 pipe_profile_exists(struct rte_eth_dev *dev,
2077 struct rte_sched_pipe_params *pp,
2078 uint32_t *pipe_profile_id)
2080 struct pmd_internals *p = dev->data->dev_private;
2081 struct tm_params *t = &p->soft.tm.params;
2084 for (i = 0; i < t->n_pipe_profiles; i++)
2085 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2086 if (pipe_profile_id)
2087 *pipe_profile_id = i;
2095 pipe_profile_install(struct rte_eth_dev *dev,
2096 struct rte_sched_pipe_params *pp,
2097 uint32_t pipe_profile_id)
2099 struct pmd_internals *p = dev->data->dev_private;
2100 struct tm_params *t = &p->soft.tm.params;
2102 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2103 t->n_pipe_profiles++;
2107 pipe_profile_mark(struct rte_eth_dev *dev,
2108 uint32_t subport_id,
2110 uint32_t pipe_profile_id)
2112 struct pmd_internals *p = dev->data->dev_private;
2113 struct tm_hierarchy *h = &p->soft.tm.h;
2114 struct tm_params *t = &p->soft.tm.params;
2115 uint32_t n_pipes_per_subport, pos;
2117 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2118 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2119 pos = subport_id * n_pipes_per_subport + pipe_id;
2121 t->pipe_to_profile[pos] = pipe_profile_id;
2124 static struct rte_sched_pipe_params *
2125 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2127 struct pmd_internals *p = dev->data->dev_private;
2128 struct tm_hierarchy *h = &p->soft.tm.h;
2129 struct tm_params *t = &p->soft.tm.params;
2130 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2131 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2133 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2134 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2136 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2137 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2139 return &t->pipe_profiles[pipe_profile_id];
2143 pipe_profiles_generate(struct rte_eth_dev *dev)
2145 struct pmd_internals *p = dev->data->dev_private;
2146 struct tm_hierarchy *h = &p->soft.tm.h;
2147 struct tm_node_list *nl = &h->nodes;
2148 struct tm_node *ns, *np;
2149 uint32_t subport_id;
2151 /* Objective: Fill in the following fields in struct tm_params:
2158 TAILQ_FOREACH(ns, nl, node) {
2161 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2165 TAILQ_FOREACH(np, nl, node) {
2166 struct rte_sched_pipe_params pp;
2169 if (np->level != TM_NODE_LEVEL_PIPE ||
2170 np->parent_node_id != ns->node_id)
2173 pipe_profile_build(dev, np, &pp);
2175 if (!pipe_profile_exists(dev, &pp, &pos)) {
2176 if (!pipe_profile_free_exists(dev, &pos))
2179 pipe_profile_install(dev, &pp, pos);
2182 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2193 static struct tm_wred_profile *
2194 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2196 struct pmd_internals *p = dev->data->dev_private;
2197 struct tm_hierarchy *h = &p->soft.tm.h;
2198 struct tm_node_list *nl = &h->nodes;
2201 TAILQ_FOREACH(nq, nl, node) {
2202 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2203 nq->parent_node->priority != tc_id)
2206 return nq->wred_profile;
2212 #ifdef RTE_SCHED_RED
2215 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
2217 struct pmd_internals *p = dev->data->dev_private;
2218 struct rte_sched_subport_params *pp =
2219 &p->soft.tm.params.subport_params[subport_id];
2222 enum rte_color color;
2224 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2225 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2226 struct rte_red_params *dst =
2227 &pp->red_params[tc_id][color];
2228 struct tm_wred_profile *src_wp =
2229 tm_tc_wred_profile_get(dev, tc_id);
2230 struct rte_tm_red_params *src =
2231 &src_wp->params.red_params[color];
2233 memcpy(dst, src, sizeof(*dst));
2239 #define wred_profiles_set(dev, subport_id)
2243 static struct tm_shared_shaper *
2244 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2246 return (tc_node->params.n_shared_shapers) ?
2247 tm_shared_shaper_search(dev,
2248 tc_node->params.shared_shaper_id[0]) :
2252 static struct tm_shared_shaper *
2253 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2254 struct tm_node *subport_node,
2257 struct pmd_internals *p = dev->data->dev_private;
2258 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2261 TAILQ_FOREACH(n, nl, node) {
2262 if (n->level != TM_NODE_LEVEL_TC ||
2263 n->parent_node->parent_node_id !=
2264 subport_node->node_id ||
2265 n->priority != tc_id)
2268 return tm_tc_shared_shaper_get(dev, n);
2275 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2277 struct pmd_internals *p = dev->data->dev_private;
2278 struct tm_hierarchy *h = &p->soft.tm.h;
2279 struct tm_node_list *nl = &h->nodes;
2280 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2281 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2282 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2283 struct tm_shared_shaper *ss;
2285 uint32_t n_pipes_per_subport;
2287 /* Root node exists. */
2289 return -rte_tm_error_set(error,
2291 RTE_TM_ERROR_TYPE_LEVEL_ID,
2293 rte_strerror(EINVAL));
2295 /* There is at least one subport, max is not exceeded. */
2296 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2297 return -rte_tm_error_set(error,
2299 RTE_TM_ERROR_TYPE_LEVEL_ID,
2301 rte_strerror(EINVAL));
2303 /* There is at least one pipe. */
2304 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2305 return -rte_tm_error_set(error,
2307 RTE_TM_ERROR_TYPE_LEVEL_ID,
2309 rte_strerror(EINVAL));
2311 /* Number of pipes is the same for all subports. Maximum number of pipes
2312 * per subport is not exceeded.
2314 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2315 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2317 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2318 return -rte_tm_error_set(error,
2320 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2322 rte_strerror(EINVAL));
2324 TAILQ_FOREACH(ns, nl, node) {
2325 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2328 if (ns->n_children != n_pipes_per_subport)
2329 return -rte_tm_error_set(error,
2331 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2333 rte_strerror(EINVAL));
2336 /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2337 TAILQ_FOREACH(np, nl, node) {
2338 uint32_t mask = 0, mask_expected =
2339 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2342 if (np->level != TM_NODE_LEVEL_PIPE)
2345 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2346 return -rte_tm_error_set(error,
2348 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2350 rte_strerror(EINVAL));
2352 TAILQ_FOREACH(nt, nl, node) {
2353 if (nt->level != TM_NODE_LEVEL_TC ||
2354 nt->parent_node_id != np->node_id)
2357 mask |= 1 << nt->priority;
2360 if (mask != mask_expected)
2361 return -rte_tm_error_set(error,
2363 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2365 rte_strerror(EINVAL));
2368 /** Each Strict priority TC has exactly 1 packet queues while
2369 * lowest priority TC (Best-effort) has 4 queues.
2371 TAILQ_FOREACH(nt, nl, node) {
2372 if (nt->level != TM_NODE_LEVEL_TC)
2375 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2376 return -rte_tm_error_set(error,
2378 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2380 rte_strerror(EINVAL));
2385 * -For each TC #i, all pipes in the same subport use the same
2386 * shared shaper (or no shared shaper) for their TC#i.
2387 * -Each shared shaper needs to have at least one user. All its
2388 * users have to be TC nodes with the same priority and the same
2391 TAILQ_FOREACH(ns, nl, node) {
2392 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2395 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2398 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2399 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2401 TAILQ_FOREACH(nt, nl, node) {
2402 struct tm_shared_shaper *subport_ss, *tc_ss;
2404 if (nt->level != TM_NODE_LEVEL_TC ||
2405 nt->parent_node->parent_node_id !=
2409 subport_ss = s[nt->priority];
2410 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2412 if (subport_ss == NULL && tc_ss == NULL)
2415 if ((subport_ss == NULL && tc_ss != NULL) ||
2416 (subport_ss != NULL && tc_ss == NULL) ||
2417 subport_ss->shared_shaper_id !=
2418 tc_ss->shared_shaper_id)
2419 return -rte_tm_error_set(error,
2421 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2423 rte_strerror(EINVAL));
2427 TAILQ_FOREACH(ss, ssl, node) {
2428 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2429 uint32_t n_users = 0;
2432 TAILQ_FOREACH(nt, nl, node) {
2433 if (nt->level != TM_NODE_LEVEL_TC ||
2434 nt->priority != nt_any->priority ||
2435 nt->parent_node->parent_node_id !=
2436 nt_any->parent_node->parent_node_id)
2442 if (ss->n_users == 0 || ss->n_users != n_users)
2443 return -rte_tm_error_set(error,
2445 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2447 rte_strerror(EINVAL));
2450 /* Not too many pipe profiles. */
2451 if (pipe_profiles_generate(dev))
2452 return -rte_tm_error_set(error,
2454 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2456 rte_strerror(EINVAL));
2459 * WRED (when used, i.e. at least one WRED profile defined):
2460 * -Each WRED profile must have at least one user.
2461 * -All leaf nodes must have their private WRED context enabled.
2462 * -For each TC #i, all leaf nodes must use the same WRED profile
2463 * for their private WRED context.
2465 if (h->n_wred_profiles) {
2466 struct tm_wred_profile *wp;
2467 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2470 TAILQ_FOREACH(wp, wpl, node)
2471 if (wp->n_users == 0)
2472 return -rte_tm_error_set(error,
2474 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2476 rte_strerror(EINVAL));
2478 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2479 w[id] = tm_tc_wred_profile_get(dev, id);
2482 return -rte_tm_error_set(error,
2484 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2486 rte_strerror(EINVAL));
2489 TAILQ_FOREACH(nq, nl, node) {
2492 if (nq->level != TM_NODE_LEVEL_QUEUE)
2495 id = nq->parent_node->priority;
2497 if (nq->wred_profile == NULL ||
2498 nq->wred_profile->wred_profile_id !=
2499 w[id]->wred_profile_id)
2500 return -rte_tm_error_set(error,
2502 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2504 rte_strerror(EINVAL));
2512 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2514 struct pmd_internals *p = dev->data->dev_private;
2515 struct tm_params *t = &p->soft.tm.params;
2516 struct tm_hierarchy *h = &p->soft.tm.h;
2518 struct tm_node_list *nl = &h->nodes;
2519 struct tm_node *root = tm_root_node_present(dev), *n;
2521 uint32_t subport_id;
2523 t->port_params = (struct rte_sched_port_params) {
2524 .name = dev->data->name,
2525 .socket = dev->data->numa_node,
2526 .rate = root->shaper_profile->params.peak.rate,
2527 .mtu = dev->data->mtu,
2529 root->shaper_profile->params.pkt_length_adjust,
2530 .n_subports_per_port = root->n_children,
2531 .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
2535 TAILQ_FOREACH(n, nl, node) {
2536 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2539 if (n->level != TM_NODE_LEVEL_SUBPORT)
2542 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2543 struct tm_shared_shaper *ss;
2544 struct tm_shaper_profile *sp;
2546 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2547 sp = (ss) ? tm_shaper_profile_search(dev,
2548 ss->shaper_profile_id) :
2550 tc_rate[i] = sp->params.peak.rate;
2553 t->subport_params[subport_id] =
2554 (struct rte_sched_subport_params) {
2555 .tb_rate = n->shaper_profile->params.peak.rate,
2556 .tb_size = n->shaper_profile->params.peak.size,
2558 .tc_rate = {tc_rate[0],
2572 .tc_period = SUBPORT_TC_PERIOD,
2573 .n_pipes_per_subport_enabled =
2574 h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2575 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2576 .qsize = {p->params.tm.qsize[0],
2577 p->params.tm.qsize[1],
2578 p->params.tm.qsize[2],
2579 p->params.tm.qsize[3],
2580 p->params.tm.qsize[4],
2581 p->params.tm.qsize[5],
2582 p->params.tm.qsize[6],
2583 p->params.tm.qsize[7],
2584 p->params.tm.qsize[8],
2585 p->params.tm.qsize[9],
2586 p->params.tm.qsize[10],
2587 p->params.tm.qsize[11],
2588 p->params.tm.qsize[12],
2590 .pipe_profiles = t->pipe_profiles,
2591 .n_pipe_profiles = t->n_pipe_profiles,
2592 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2594 wred_profiles_set(dev, subport_id);
2599 /* Traffic manager hierarchy commit */
2601 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2603 struct rte_tm_error *error)
2605 struct pmd_internals *p = dev->data->dev_private;
2609 if (p->soft.tm.hierarchy_frozen)
2610 return -rte_tm_error_set(error,
2612 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2614 rte_strerror(EBUSY));
2616 status = hierarchy_commit_check(dev, error);
2619 tm_hierarchy_free(p);
2624 /* Create blueprints */
2625 hierarchy_blueprints_create(dev);
2627 /* Freeze hierarchy */
2628 p->soft.tm.hierarchy_frozen = 1;
2633 #ifdef RTE_SCHED_SUBPORT_TC_OV
2636 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2638 struct pmd_internals *p = dev->data->dev_private;
2639 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2641 struct tm_node *ns = np->parent_node;
2642 uint32_t subport_id = tm_node_subport_id(dev, ns);
2644 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2645 struct rte_sched_pipe_params profile1;
2646 uint32_t pipe_profile_id;
2648 /* Derive new pipe profile. */
2649 memcpy(&profile1, profile0, sizeof(profile1));
2650 profile1.tc_ov_weight = (uint8_t)weight;
2652 /* Since implementation does not allow adding more pipe profiles after
2653 * port configuration, the pipe configuration can be successfully
2654 * updated only if the new profile is also part of the existing set of
2657 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2660 /* Update the pipe profile used by the current pipe. */
2661 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2662 (int32_t)pipe_profile_id))
2665 /* Commit changes. */
2666 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2667 np->weight = weight;
2675 update_queue_weight(struct rte_eth_dev *dev,
2676 struct tm_node *nq, uint32_t weight)
2678 struct pmd_internals *p = dev->data->dev_private;
2679 uint32_t queue_id = tm_node_queue_id(dev, nq);
2681 struct tm_node *nt = nq->parent_node;
2683 struct tm_node *np = nt->parent_node;
2684 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2686 struct tm_node *ns = np->parent_node;
2687 uint32_t subport_id = tm_node_subport_id(dev, ns);
2689 uint32_t pipe_be_queue_id =
2690 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2692 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2693 struct rte_sched_pipe_params profile1;
2694 uint32_t pipe_profile_id;
2696 /* Derive new pipe profile. */
2697 memcpy(&profile1, profile0, sizeof(profile1));
2698 profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2700 /* Since implementation does not allow adding more pipe profiles after
2701 * port configuration, the pipe configuration can be successfully
2702 * updated only if the new profile is also part of the existing set
2705 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2708 /* Update the pipe profile used by the current pipe. */
2709 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2710 (int32_t)pipe_profile_id))
2713 /* Commit changes. */
2714 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2715 nq->weight = weight;
2720 /* Traffic manager node parent update */
2722 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2724 uint32_t parent_node_id,
2727 struct rte_tm_error *error)
2731 /* Port must be started and TM used. */
2732 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2733 return -rte_tm_error_set(error,
2735 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2737 rte_strerror(EBUSY));
2739 /* Node must be valid */
2740 n = tm_node_search(dev, node_id);
2742 return -rte_tm_error_set(error,
2744 RTE_TM_ERROR_TYPE_NODE_ID,
2746 rte_strerror(EINVAL));
2748 /* Parent node must be the same */
2749 if (n->parent_node_id != parent_node_id)
2750 return -rte_tm_error_set(error,
2752 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2754 rte_strerror(EINVAL));
2756 /* Priority must be the same */
2757 if (n->priority != priority)
2758 return -rte_tm_error_set(error,
2760 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2762 rte_strerror(EINVAL));
2764 /* weight: must be 1 .. 255 */
2765 if (weight == 0 || weight >= UINT8_MAX)
2766 return -rte_tm_error_set(error,
2768 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2770 rte_strerror(EINVAL));
2773 case TM_NODE_LEVEL_PORT:
2774 return -rte_tm_error_set(error,
2776 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2778 rte_strerror(EINVAL));
2780 case TM_NODE_LEVEL_SUBPORT:
2781 return -rte_tm_error_set(error,
2783 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2785 rte_strerror(EINVAL));
2787 case TM_NODE_LEVEL_PIPE:
2788 #ifdef RTE_SCHED_SUBPORT_TC_OV
2789 if (update_pipe_weight(dev, n, weight))
2790 return -rte_tm_error_set(error,
2792 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2794 rte_strerror(EINVAL));
2797 return -rte_tm_error_set(error,
2799 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2801 rte_strerror(EINVAL));
2804 case TM_NODE_LEVEL_TC:
2805 return -rte_tm_error_set(error,
2807 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2809 rte_strerror(EINVAL));
2811 case TM_NODE_LEVEL_QUEUE:
2814 if (update_queue_weight(dev, n, weight))
2815 return -rte_tm_error_set(error,
2817 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2819 rte_strerror(EINVAL));
2825 update_subport_rate(struct rte_eth_dev *dev,
2827 struct tm_shaper_profile *sp)
2829 struct pmd_internals *p = dev->data->dev_private;
2830 uint32_t subport_id = tm_node_subport_id(dev, ns);
2832 struct rte_sched_subport_params subport_params;
2834 /* Derive new subport configuration. */
2835 memcpy(&subport_params,
2836 &p->soft.tm.params.subport_params[subport_id],
2837 sizeof(subport_params));
2838 subport_params.tb_rate = sp->params.peak.rate;
2839 subport_params.tb_size = sp->params.peak.size;
2841 /* Update the subport configuration. */
2842 if (rte_sched_subport_config(SCHED(p), subport_id,
2846 /* Commit changes. */
2847 ns->shaper_profile->n_users--;
2849 ns->shaper_profile = sp;
2850 ns->params.shaper_profile_id = sp->shaper_profile_id;
2853 memcpy(&p->soft.tm.params.subport_params[subport_id],
2855 sizeof(subport_params));
2861 update_pipe_rate(struct rte_eth_dev *dev,
2863 struct tm_shaper_profile *sp)
2865 struct pmd_internals *p = dev->data->dev_private;
2866 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2868 struct tm_node *ns = np->parent_node;
2869 uint32_t subport_id = tm_node_subport_id(dev, ns);
2871 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2872 struct rte_sched_pipe_params profile1;
2873 uint32_t pipe_profile_id;
2875 /* Derive new pipe profile. */
2876 memcpy(&profile1, profile0, sizeof(profile1));
2877 profile1.tb_rate = sp->params.peak.rate;
2878 profile1.tb_size = sp->params.peak.size;
2880 /* Since implementation does not allow adding more pipe profiles after
2881 * port configuration, the pipe configuration can be successfully
2882 * updated only if the new profile is also part of the existing set of
2885 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2888 /* Update the pipe profile used by the current pipe. */
2889 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2890 (int32_t)pipe_profile_id))
2893 /* Commit changes. */
2894 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2895 np->shaper_profile->n_users--;
2896 np->shaper_profile = sp;
2897 np->params.shaper_profile_id = sp->shaper_profile_id;
2904 update_tc_rate(struct rte_eth_dev *dev,
2906 struct tm_shaper_profile *sp)
2908 struct pmd_internals *p = dev->data->dev_private;
2909 uint32_t tc_id = tm_node_tc_id(dev, nt);
2911 struct tm_node *np = nt->parent_node;
2912 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2914 struct tm_node *ns = np->parent_node;
2915 uint32_t subport_id = tm_node_subport_id(dev, ns);
2917 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2918 struct rte_sched_pipe_params profile1;
2919 uint32_t pipe_profile_id;
2921 /* Derive new pipe profile. */
2922 memcpy(&profile1, profile0, sizeof(profile1));
2923 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2925 /* Since implementation does not allow adding more pipe profiles after
2926 * port configuration, the pipe configuration can be successfully
2927 * updated only if the new profile is also part of the existing set of
2930 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2933 /* Update the pipe profile used by the current pipe. */
2934 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2935 (int32_t)pipe_profile_id))
2938 /* Commit changes. */
2939 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2940 nt->shaper_profile->n_users--;
2941 nt->shaper_profile = sp;
2942 nt->params.shaper_profile_id = sp->shaper_profile_id;
2948 /* Traffic manager node shaper update */
2950 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2952 uint32_t shaper_profile_id,
2953 struct rte_tm_error *error)
2956 struct tm_shaper_profile *sp;
2958 /* Port must be started and TM used. */
2959 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2960 return -rte_tm_error_set(error,
2962 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2964 rte_strerror(EBUSY));
2966 /* Node must be valid */
2967 n = tm_node_search(dev, node_id);
2969 return -rte_tm_error_set(error,
2971 RTE_TM_ERROR_TYPE_NODE_ID,
2973 rte_strerror(EINVAL));
2975 /* Shaper profile must be valid. */
2976 sp = tm_shaper_profile_search(dev, shaper_profile_id);
2978 return -rte_tm_error_set(error,
2980 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2982 rte_strerror(EINVAL));
2985 case TM_NODE_LEVEL_PORT:
2986 return -rte_tm_error_set(error,
2988 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2990 rte_strerror(EINVAL));
2992 case TM_NODE_LEVEL_SUBPORT:
2993 if (update_subport_rate(dev, n, sp))
2994 return -rte_tm_error_set(error,
2996 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2998 rte_strerror(EINVAL));
3001 case TM_NODE_LEVEL_PIPE:
3002 if (update_pipe_rate(dev, n, sp))
3003 return -rte_tm_error_set(error,
3005 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3007 rte_strerror(EINVAL));
3010 case TM_NODE_LEVEL_TC:
3011 if (update_tc_rate(dev, n, sp))
3012 return -rte_tm_error_set(error,
3014 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3016 rte_strerror(EINVAL));
3019 case TM_NODE_LEVEL_QUEUE:
3022 return -rte_tm_error_set(error,
3024 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3026 rte_strerror(EINVAL));
3030 static inline uint32_t
3031 tm_port_queue_id(struct rte_eth_dev *dev,
3032 uint32_t port_subport_id,
3033 uint32_t subport_pipe_id,
3034 uint32_t pipe_tc_id,
3035 uint32_t tc_queue_id)
3037 struct pmd_internals *p = dev->data->dev_private;
3038 struct tm_hierarchy *h = &p->soft.tm.h;
3039 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3040 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3042 uint32_t port_pipe_id =
3043 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3045 uint32_t port_queue_id =
3046 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3048 return port_queue_id;
3052 read_port_stats(struct rte_eth_dev *dev,
3054 struct rte_tm_node_stats *stats,
3055 uint64_t *stats_mask,
3058 struct pmd_internals *p = dev->data->dev_private;
3059 struct tm_hierarchy *h = &p->soft.tm.h;
3060 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3061 uint32_t subport_id;
3063 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3064 struct rte_sched_subport_stats s;
3068 int status = rte_sched_subport_read_stats(SCHED(p),
3075 /* Stats accumulate */
3076 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3078 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3079 nr->stats.n_bytes +=
3080 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3081 nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3082 s.n_pkts_tc_dropped[id];
3083 nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3084 s.n_bytes_tc_dropped[id];
3090 memcpy(stats, &nr->stats, sizeof(*stats));
3093 *stats_mask = STATS_MASK_DEFAULT;
3097 memset(&nr->stats, 0, sizeof(nr->stats));
3103 read_subport_stats(struct rte_eth_dev *dev,
3105 struct rte_tm_node_stats *stats,
3106 uint64_t *stats_mask,
3109 struct pmd_internals *p = dev->data->dev_private;
3110 uint32_t subport_id = tm_node_subport_id(dev, ns);
3111 struct rte_sched_subport_stats s;
3112 uint32_t tc_ov, tc_id;
3115 int status = rte_sched_subport_read_stats(SCHED(p),
3122 /* Stats accumulate */
3123 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3125 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3126 ns->stats.n_bytes +=
3127 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3128 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3129 s.n_pkts_tc_dropped[tc_id];
3130 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3131 s.n_bytes_tc_dropped[tc_id];
3136 memcpy(stats, &ns->stats, sizeof(*stats));
3139 *stats_mask = STATS_MASK_DEFAULT;
3143 memset(&ns->stats, 0, sizeof(ns->stats));
3149 read_pipe_stats(struct rte_eth_dev *dev,
3151 struct rte_tm_node_stats *stats,
3152 uint64_t *stats_mask,
3155 struct pmd_internals *p = dev->data->dev_private;
3157 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3159 struct tm_node *ns = np->parent_node;
3160 uint32_t subport_id = tm_node_subport_id(dev, ns);
3161 uint32_t tc_id, queue_id;
3165 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3166 struct rte_sched_queue_stats s;
3169 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3173 tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3174 queue_id = i - tc_id;
3177 uint32_t qid = tm_port_queue_id(dev,
3183 int status = rte_sched_queue_read_stats(SCHED(p),
3190 /* Stats accumulate */
3191 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3192 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3193 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3194 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3196 np->stats.leaf.n_pkts_queued = qlen;
3201 memcpy(stats, &np->stats, sizeof(*stats));
3204 *stats_mask = STATS_MASK_DEFAULT;
3208 memset(&np->stats, 0, sizeof(np->stats));
3214 read_tc_stats(struct rte_eth_dev *dev,
3216 struct rte_tm_node_stats *stats,
3217 uint64_t *stats_mask,
3220 struct pmd_internals *p = dev->data->dev_private;
3222 uint32_t tc_id = tm_node_tc_id(dev, nt);
3224 struct tm_node *np = nt->parent_node;
3225 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3227 struct tm_node *ns = np->parent_node;
3228 uint32_t subport_id = tm_node_subport_id(dev, ns);
3229 struct rte_sched_queue_stats s;
3235 if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3236 qid = tm_port_queue_id(dev,
3242 status = rte_sched_queue_read_stats(SCHED(p),
3249 /* Stats accumulate */
3250 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3251 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3252 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3253 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3255 nt->stats.leaf.n_pkts_queued = qlen;
3257 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3258 qid = tm_port_queue_id(dev,
3264 status = rte_sched_queue_read_stats(SCHED(p),
3271 /* Stats accumulate */
3272 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3273 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3274 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3276 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3278 nt->stats.leaf.n_pkts_queued = qlen;
3284 memcpy(stats, &nt->stats, sizeof(*stats));
3287 *stats_mask = STATS_MASK_DEFAULT;
3291 memset(&nt->stats, 0, sizeof(nt->stats));
3297 read_queue_stats(struct rte_eth_dev *dev,
3299 struct rte_tm_node_stats *stats,
3300 uint64_t *stats_mask,
3303 struct pmd_internals *p = dev->data->dev_private;
3304 struct rte_sched_queue_stats s;
3307 uint32_t queue_id = tm_node_queue_id(dev, nq);
3309 struct tm_node *nt = nq->parent_node;
3310 uint32_t tc_id = tm_node_tc_id(dev, nt);
3312 struct tm_node *np = nt->parent_node;
3313 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3315 struct tm_node *ns = np->parent_node;
3316 uint32_t subport_id = tm_node_subport_id(dev, ns);
3319 uint32_t qid = tm_port_queue_id(dev,
3325 int status = rte_sched_queue_read_stats(SCHED(p),
3332 /* Stats accumulate */
3333 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3334 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3335 nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3336 nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3338 nq->stats.leaf.n_pkts_queued = qlen;
3342 memcpy(stats, &nq->stats, sizeof(*stats));
3345 *stats_mask = STATS_MASK_QUEUE;
3349 memset(&nq->stats, 0, sizeof(nq->stats));
3354 /* Traffic manager read stats counters for specific node */
3356 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3358 struct rte_tm_node_stats *stats,
3359 uint64_t *stats_mask,
3361 struct rte_tm_error *error)
3365 /* Port must be started and TM used. */
3366 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3367 return -rte_tm_error_set(error,
3369 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3371 rte_strerror(EBUSY));
3373 /* Node must be valid */
3374 n = tm_node_search(dev, node_id);
3376 return -rte_tm_error_set(error,
3378 RTE_TM_ERROR_TYPE_NODE_ID,
3380 rte_strerror(EINVAL));
3383 case TM_NODE_LEVEL_PORT:
3384 if (read_port_stats(dev, n, stats, stats_mask, clear))
3385 return -rte_tm_error_set(error,
3387 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3389 rte_strerror(EINVAL));
3392 case TM_NODE_LEVEL_SUBPORT:
3393 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3394 return -rte_tm_error_set(error,
3396 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3398 rte_strerror(EINVAL));
3401 case TM_NODE_LEVEL_PIPE:
3402 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3403 return -rte_tm_error_set(error,
3405 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3407 rte_strerror(EINVAL));
3410 case TM_NODE_LEVEL_TC:
3411 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3412 return -rte_tm_error_set(error,
3414 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3416 rte_strerror(EINVAL));
3419 case TM_NODE_LEVEL_QUEUE:
3421 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3422 return -rte_tm_error_set(error,
3424 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3426 rte_strerror(EINVAL));
3431 const struct rte_tm_ops pmd_tm_ops = {
3432 .node_type_get = pmd_tm_node_type_get,
3433 .capabilities_get = pmd_tm_capabilities_get,
3434 .level_capabilities_get = pmd_tm_level_capabilities_get,
3435 .node_capabilities_get = pmd_tm_node_capabilities_get,
3437 .wred_profile_add = pmd_tm_wred_profile_add,
3438 .wred_profile_delete = pmd_tm_wred_profile_delete,
3439 .shared_wred_context_add_update = NULL,
3440 .shared_wred_context_delete = NULL,
3442 .shaper_profile_add = pmd_tm_shaper_profile_add,
3443 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3444 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3445 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3447 .node_add = pmd_tm_node_add,
3448 .node_delete = pmd_tm_node_delete,
3449 .node_suspend = NULL,
3450 .node_resume = NULL,
3451 .hierarchy_commit = pmd_tm_hierarchy_commit,
3453 .node_parent_update = pmd_tm_node_parent_update,
3454 .node_shaper_update = pmd_tm_node_shaper_update,
3455 .node_shared_shaper_update = NULL,
3456 .node_stats_update = NULL,
3457 .node_wfq_weight_mode_update = NULL,
3458 .node_cman_update = NULL,
3459 .node_wred_context_update = NULL,
3460 .node_shared_wred_context_update = NULL,
3462 .node_stats_read = pmd_tm_node_stats_read,