4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_malloc.h>
40 #include "rte_eth_softnic_internals.h"
41 #include "rte_eth_softnic.h"
43 #define BYTES_IN_MBPS (1000 * 1000 / 8)
44 #define SUBPORT_TC_PERIOD 10
45 #define PIPE_TC_PERIOD 40
48 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
50 uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
54 if (params->soft.tm.rate) {
55 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
58 params->soft.tm.rate =
59 (hard_rate_bytes_per_sec > UINT32_MAX) ?
60 UINT32_MAX : hard_rate_bytes_per_sec;
64 if (params->soft.tm.nb_queues == 0)
67 if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
68 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
70 params->soft.tm.nb_queues =
71 rte_align32pow2(params->soft.tm.nb_queues);
74 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
75 if (params->soft.tm.qsize[i] == 0)
78 params->soft.tm.qsize[i] =
79 rte_align32pow2(params->soft.tm.qsize[i]);
82 /* enq_bsz, deq_bsz */
83 if (params->soft.tm.enq_bsz == 0 ||
84 params->soft.tm.deq_bsz == 0 ||
85 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
92 tm_hierarchy_init(struct pmd_internals *p)
94 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
96 /* Initialize shaper profile list */
97 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
99 /* Initialize shared shaper list */
100 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
102 /* Initialize wred profile list */
103 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
105 /* Initialize TM node list */
106 TAILQ_INIT(&p->soft.tm.h.nodes);
110 tm_hierarchy_uninit(struct pmd_internals *p)
112 /* Remove all nodes*/
114 struct tm_node *tm_node;
116 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
120 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
124 /* Remove all WRED profiles */
126 struct tm_wred_profile *wred_profile;
128 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
129 if (wred_profile == NULL)
132 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
136 /* Remove all shared shapers */
138 struct tm_shared_shaper *shared_shaper;
140 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
141 if (shared_shaper == NULL)
144 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
148 /* Remove all shaper profiles */
150 struct tm_shaper_profile *shaper_profile;
152 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
153 if (shaper_profile == NULL)
156 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
157 shaper_profile, node);
158 free(shaper_profile);
161 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
165 tm_init(struct pmd_internals *p,
166 struct pmd_params *params,
169 uint32_t enq_bsz = params->soft.tm.enq_bsz;
170 uint32_t deq_bsz = params->soft.tm.deq_bsz;
172 p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
173 2 * enq_bsz * sizeof(struct rte_mbuf *),
177 if (p->soft.tm.pkts_enq == NULL)
180 p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
181 deq_bsz * sizeof(struct rte_mbuf *),
185 if (p->soft.tm.pkts_deq == NULL) {
186 rte_free(p->soft.tm.pkts_enq);
190 tm_hierarchy_init(p);
196 tm_free(struct pmd_internals *p)
198 tm_hierarchy_uninit(p);
199 rte_free(p->soft.tm.pkts_enq);
200 rte_free(p->soft.tm.pkts_deq);
204 tm_start(struct pmd_internals *p)
206 struct tm_params *t = &p->soft.tm.params;
207 uint32_t n_subports, subport_id;
210 /* Is hierarchy frozen? */
211 if (p->soft.tm.hierarchy_frozen == 0)
215 p->soft.tm.sched = rte_sched_port_config(&t->port_params);
216 if (p->soft.tm.sched == NULL)
220 n_subports = t->port_params.n_subports_per_port;
221 for (subport_id = 0; subport_id < n_subports; subport_id++) {
222 uint32_t n_pipes_per_subport =
223 t->port_params.n_pipes_per_subport;
226 status = rte_sched_subport_config(p->soft.tm.sched,
228 &t->subport_params[subport_id]);
230 rte_sched_port_free(p->soft.tm.sched);
235 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
236 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
237 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
239 int profile_id = t->pipe_to_profile[pos];
244 status = rte_sched_pipe_config(p->soft.tm.sched,
249 rte_sched_port_free(p->soft.tm.sched);
259 tm_stop(struct pmd_internals *p)
261 if (p->soft.tm.sched)
262 rte_sched_port_free(p->soft.tm.sched);
264 /* Unfreeze hierarchy */
265 p->soft.tm.hierarchy_frozen = 0;
268 static struct tm_shaper_profile *
269 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
271 struct pmd_internals *p = dev->data->dev_private;
272 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
273 struct tm_shaper_profile *sp;
275 TAILQ_FOREACH(sp, spl, node)
276 if (shaper_profile_id == sp->shaper_profile_id)
282 static struct tm_shared_shaper *
283 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
285 struct pmd_internals *p = dev->data->dev_private;
286 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
287 struct tm_shared_shaper *ss;
289 TAILQ_FOREACH(ss, ssl, node)
290 if (shared_shaper_id == ss->shared_shaper_id)
296 static struct tm_wred_profile *
297 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
299 struct pmd_internals *p = dev->data->dev_private;
300 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
301 struct tm_wred_profile *wp;
303 TAILQ_FOREACH(wp, wpl, node)
304 if (wred_profile_id == wp->wred_profile_id)
310 static struct tm_node *
311 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
313 struct pmd_internals *p = dev->data->dev_private;
314 struct tm_node_list *nl = &p->soft.tm.h.nodes;
317 TAILQ_FOREACH(n, nl, node)
318 if (n->node_id == node_id)
324 static struct tm_node *
325 tm_root_node_present(struct rte_eth_dev *dev)
327 struct pmd_internals *p = dev->data->dev_private;
328 struct tm_node_list *nl = &p->soft.tm.h.nodes;
331 TAILQ_FOREACH(n, nl, node)
332 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
339 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
341 struct pmd_internals *p = dev->data->dev_private;
342 struct tm_node_list *nl = &p->soft.tm.h.nodes;
347 TAILQ_FOREACH(ns, nl, node) {
348 if (ns->level != TM_NODE_LEVEL_SUBPORT)
351 if (ns->node_id == subport_node->node_id)
361 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
363 struct pmd_internals *p = dev->data->dev_private;
364 struct tm_node_list *nl = &p->soft.tm.h.nodes;
369 TAILQ_FOREACH(np, nl, node) {
370 if (np->level != TM_NODE_LEVEL_PIPE ||
371 np->parent_node_id != pipe_node->parent_node_id)
374 if (np->node_id == pipe_node->node_id)
384 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
386 return tc_node->priority;
390 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
392 struct pmd_internals *p = dev->data->dev_private;
393 struct tm_node_list *nl = &p->soft.tm.h.nodes;
398 TAILQ_FOREACH(nq, nl, node) {
399 if (nq->level != TM_NODE_LEVEL_QUEUE ||
400 nq->parent_node_id != queue_node->parent_node_id)
403 if (nq->node_id == queue_node->node_id)
413 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
415 struct pmd_internals *p = dev->data->dev_private;
416 uint32_t n_queues_max = p->params.soft.tm.nb_queues;
417 uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
418 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
419 uint32_t n_subports_max = n_pipes_max;
420 uint32_t n_root_max = 1;
423 case TM_NODE_LEVEL_PORT:
425 case TM_NODE_LEVEL_SUBPORT:
426 return n_subports_max;
427 case TM_NODE_LEVEL_PIPE:
429 case TM_NODE_LEVEL_TC:
431 case TM_NODE_LEVEL_QUEUE:
437 /* Traffic manager node type get */
439 pmd_tm_node_type_get(struct rte_eth_dev *dev,
442 struct rte_tm_error *error)
444 struct pmd_internals *p = dev->data->dev_private;
447 return -rte_tm_error_set(error,
449 RTE_TM_ERROR_TYPE_UNSPECIFIED,
451 rte_strerror(EINVAL));
453 if (node_id == RTE_TM_NODE_ID_NULL ||
454 (tm_node_search(dev, node_id) == NULL))
455 return -rte_tm_error_set(error,
457 RTE_TM_ERROR_TYPE_NODE_ID,
459 rte_strerror(EINVAL));
461 *is_leaf = node_id < p->params.soft.tm.nb_queues;
467 #define WRED_SUPPORTED 1
469 #define WRED_SUPPORTED 0
472 #define STATS_MASK_DEFAULT \
473 (RTE_TM_STATS_N_PKTS | \
474 RTE_TM_STATS_N_BYTES | \
475 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
476 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
478 #define STATS_MASK_QUEUE \
479 (STATS_MASK_DEFAULT | \
480 RTE_TM_STATS_N_PKTS_QUEUED)
482 static const struct rte_tm_capabilities tm_cap = {
483 .n_nodes_max = UINT32_MAX,
484 .n_levels_max = TM_NODE_LEVEL_MAX,
486 .non_leaf_nodes_identical = 0,
487 .leaf_nodes_identical = 1,
489 .shaper_n_max = UINT32_MAX,
490 .shaper_private_n_max = UINT32_MAX,
491 .shaper_private_dual_rate_n_max = 0,
492 .shaper_private_rate_min = 1,
493 .shaper_private_rate_max = UINT32_MAX,
495 .shaper_shared_n_max = UINT32_MAX,
496 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
497 .shaper_shared_n_shapers_per_node_max = 1,
498 .shaper_shared_dual_rate_n_max = 0,
499 .shaper_shared_rate_min = 1,
500 .shaper_shared_rate_max = UINT32_MAX,
502 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
503 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
505 .sched_n_children_max = UINT32_MAX,
506 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
507 .sched_wfq_n_children_per_group_max = UINT32_MAX,
508 .sched_wfq_n_groups_max = 1,
509 .sched_wfq_weight_max = UINT32_MAX,
511 .cman_head_drop_supported = 0,
512 .cman_wred_context_n_max = 0,
513 .cman_wred_context_private_n_max = 0,
514 .cman_wred_context_shared_n_max = 0,
515 .cman_wred_context_shared_n_nodes_per_context_max = 0,
516 .cman_wred_context_shared_n_contexts_per_node_max = 0,
518 .mark_vlan_dei_supported = {0, 0, 0},
519 .mark_ip_ecn_tcp_supported = {0, 0, 0},
520 .mark_ip_ecn_sctp_supported = {0, 0, 0},
521 .mark_ip_dscp_supported = {0, 0, 0},
523 .dynamic_update_mask = 0,
525 .stats_mask = STATS_MASK_QUEUE,
528 /* Traffic manager capabilities get */
530 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
531 struct rte_tm_capabilities *cap,
532 struct rte_tm_error *error)
535 return -rte_tm_error_set(error,
537 RTE_TM_ERROR_TYPE_CAPABILITIES,
539 rte_strerror(EINVAL));
541 memcpy(cap, &tm_cap, sizeof(*cap));
543 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
544 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
545 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
546 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
547 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
549 cap->shaper_private_n_max =
550 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
551 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
552 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
553 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
555 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
556 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
558 cap->shaper_n_max = cap->shaper_private_n_max +
559 cap->shaper_shared_n_max;
561 cap->shaper_shared_n_nodes_per_shaper_max =
562 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
564 cap->sched_n_children_max = RTE_MAX(
565 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
566 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
568 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
571 cap->cman_wred_context_private_n_max =
572 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
574 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
575 cap->cman_wred_context_shared_n_max;
580 static const struct rte_tm_level_capabilities tm_level_cap[] = {
581 [TM_NODE_LEVEL_PORT] = {
583 .n_nodes_nonleaf_max = 1,
584 .n_nodes_leaf_max = 0,
585 .non_leaf_nodes_identical = 1,
586 .leaf_nodes_identical = 0,
589 .shaper_private_supported = 1,
590 .shaper_private_dual_rate_supported = 0,
591 .shaper_private_rate_min = 1,
592 .shaper_private_rate_max = UINT32_MAX,
593 .shaper_shared_n_max = 0,
595 .sched_n_children_max = UINT32_MAX,
596 .sched_sp_n_priorities_max = 1,
597 .sched_wfq_n_children_per_group_max = UINT32_MAX,
598 .sched_wfq_n_groups_max = 1,
599 .sched_wfq_weight_max = 1,
601 .stats_mask = STATS_MASK_DEFAULT,
605 [TM_NODE_LEVEL_SUBPORT] = {
606 .n_nodes_max = UINT32_MAX,
607 .n_nodes_nonleaf_max = UINT32_MAX,
608 .n_nodes_leaf_max = 0,
609 .non_leaf_nodes_identical = 1,
610 .leaf_nodes_identical = 0,
613 .shaper_private_supported = 1,
614 .shaper_private_dual_rate_supported = 0,
615 .shaper_private_rate_min = 1,
616 .shaper_private_rate_max = UINT32_MAX,
617 .shaper_shared_n_max = 0,
619 .sched_n_children_max = UINT32_MAX,
620 .sched_sp_n_priorities_max = 1,
621 .sched_wfq_n_children_per_group_max = UINT32_MAX,
622 .sched_wfq_n_groups_max = 1,
623 #ifdef RTE_SCHED_SUBPORT_TC_OV
624 .sched_wfq_weight_max = UINT32_MAX,
626 .sched_wfq_weight_max = 1,
628 .stats_mask = STATS_MASK_DEFAULT,
632 [TM_NODE_LEVEL_PIPE] = {
633 .n_nodes_max = UINT32_MAX,
634 .n_nodes_nonleaf_max = UINT32_MAX,
635 .n_nodes_leaf_max = 0,
636 .non_leaf_nodes_identical = 1,
637 .leaf_nodes_identical = 0,
640 .shaper_private_supported = 1,
641 .shaper_private_dual_rate_supported = 0,
642 .shaper_private_rate_min = 1,
643 .shaper_private_rate_max = UINT32_MAX,
644 .shaper_shared_n_max = 0,
646 .sched_n_children_max =
647 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
648 .sched_sp_n_priorities_max =
649 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
650 .sched_wfq_n_children_per_group_max = 1,
651 .sched_wfq_n_groups_max = 0,
652 .sched_wfq_weight_max = 1,
654 .stats_mask = STATS_MASK_DEFAULT,
658 [TM_NODE_LEVEL_TC] = {
659 .n_nodes_max = UINT32_MAX,
660 .n_nodes_nonleaf_max = UINT32_MAX,
661 .n_nodes_leaf_max = 0,
662 .non_leaf_nodes_identical = 1,
663 .leaf_nodes_identical = 0,
666 .shaper_private_supported = 1,
667 .shaper_private_dual_rate_supported = 0,
668 .shaper_private_rate_min = 1,
669 .shaper_private_rate_max = UINT32_MAX,
670 .shaper_shared_n_max = 1,
672 .sched_n_children_max =
673 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
674 .sched_sp_n_priorities_max = 1,
675 .sched_wfq_n_children_per_group_max =
676 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
677 .sched_wfq_n_groups_max = 1,
678 .sched_wfq_weight_max = UINT32_MAX,
680 .stats_mask = STATS_MASK_DEFAULT,
684 [TM_NODE_LEVEL_QUEUE] = {
685 .n_nodes_max = UINT32_MAX,
686 .n_nodes_nonleaf_max = 0,
687 .n_nodes_leaf_max = UINT32_MAX,
688 .non_leaf_nodes_identical = 0,
689 .leaf_nodes_identical = 1,
692 .shaper_private_supported = 0,
693 .shaper_private_dual_rate_supported = 0,
694 .shaper_private_rate_min = 0,
695 .shaper_private_rate_max = 0,
696 .shaper_shared_n_max = 0,
698 .cman_head_drop_supported = 0,
699 .cman_wred_context_private_supported = WRED_SUPPORTED,
700 .cman_wred_context_shared_n_max = 0,
702 .stats_mask = STATS_MASK_QUEUE,
707 /* Traffic manager level capabilities get */
709 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
711 struct rte_tm_level_capabilities *cap,
712 struct rte_tm_error *error)
715 return -rte_tm_error_set(error,
717 RTE_TM_ERROR_TYPE_CAPABILITIES,
719 rte_strerror(EINVAL));
721 if (level_id >= TM_NODE_LEVEL_MAX)
722 return -rte_tm_error_set(error,
724 RTE_TM_ERROR_TYPE_LEVEL_ID,
726 rte_strerror(EINVAL));
728 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
731 case TM_NODE_LEVEL_PORT:
732 cap->nonleaf.sched_n_children_max =
733 tm_level_get_max_nodes(dev,
734 TM_NODE_LEVEL_SUBPORT);
735 cap->nonleaf.sched_wfq_n_children_per_group_max =
736 cap->nonleaf.sched_n_children_max;
739 case TM_NODE_LEVEL_SUBPORT:
740 cap->n_nodes_max = tm_level_get_max_nodes(dev,
741 TM_NODE_LEVEL_SUBPORT);
742 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
743 cap->nonleaf.sched_n_children_max =
744 tm_level_get_max_nodes(dev,
746 cap->nonleaf.sched_wfq_n_children_per_group_max =
747 cap->nonleaf.sched_n_children_max;
750 case TM_NODE_LEVEL_PIPE:
751 cap->n_nodes_max = tm_level_get_max_nodes(dev,
753 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
756 case TM_NODE_LEVEL_TC:
757 cap->n_nodes_max = tm_level_get_max_nodes(dev,
759 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
762 case TM_NODE_LEVEL_QUEUE:
764 cap->n_nodes_max = tm_level_get_max_nodes(dev,
765 TM_NODE_LEVEL_QUEUE);
766 cap->n_nodes_leaf_max = cap->n_nodes_max;
773 static const struct rte_tm_node_capabilities tm_node_cap[] = {
774 [TM_NODE_LEVEL_PORT] = {
775 .shaper_private_supported = 1,
776 .shaper_private_dual_rate_supported = 0,
777 .shaper_private_rate_min = 1,
778 .shaper_private_rate_max = UINT32_MAX,
779 .shaper_shared_n_max = 0,
782 .sched_n_children_max = UINT32_MAX,
783 .sched_sp_n_priorities_max = 1,
784 .sched_wfq_n_children_per_group_max = UINT32_MAX,
785 .sched_wfq_n_groups_max = 1,
786 .sched_wfq_weight_max = 1,
789 .stats_mask = STATS_MASK_DEFAULT,
792 [TM_NODE_LEVEL_SUBPORT] = {
793 .shaper_private_supported = 1,
794 .shaper_private_dual_rate_supported = 0,
795 .shaper_private_rate_min = 1,
796 .shaper_private_rate_max = UINT32_MAX,
797 .shaper_shared_n_max = 0,
800 .sched_n_children_max = UINT32_MAX,
801 .sched_sp_n_priorities_max = 1,
802 .sched_wfq_n_children_per_group_max = UINT32_MAX,
803 .sched_wfq_n_groups_max = 1,
804 .sched_wfq_weight_max = UINT32_MAX,
807 .stats_mask = STATS_MASK_DEFAULT,
810 [TM_NODE_LEVEL_PIPE] = {
811 .shaper_private_supported = 1,
812 .shaper_private_dual_rate_supported = 0,
813 .shaper_private_rate_min = 1,
814 .shaper_private_rate_max = UINT32_MAX,
815 .shaper_shared_n_max = 0,
818 .sched_n_children_max =
819 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
820 .sched_sp_n_priorities_max =
821 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
822 .sched_wfq_n_children_per_group_max = 1,
823 .sched_wfq_n_groups_max = 0,
824 .sched_wfq_weight_max = 1,
827 .stats_mask = STATS_MASK_DEFAULT,
830 [TM_NODE_LEVEL_TC] = {
831 .shaper_private_supported = 1,
832 .shaper_private_dual_rate_supported = 0,
833 .shaper_private_rate_min = 1,
834 .shaper_private_rate_max = UINT32_MAX,
835 .shaper_shared_n_max = 1,
838 .sched_n_children_max =
839 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
840 .sched_sp_n_priorities_max = 1,
841 .sched_wfq_n_children_per_group_max =
842 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
843 .sched_wfq_n_groups_max = 1,
844 .sched_wfq_weight_max = UINT32_MAX,
847 .stats_mask = STATS_MASK_DEFAULT,
850 [TM_NODE_LEVEL_QUEUE] = {
851 .shaper_private_supported = 0,
852 .shaper_private_dual_rate_supported = 0,
853 .shaper_private_rate_min = 0,
854 .shaper_private_rate_max = 0,
855 .shaper_shared_n_max = 0,
859 .cman_head_drop_supported = 0,
860 .cman_wred_context_private_supported = WRED_SUPPORTED,
861 .cman_wred_context_shared_n_max = 0,
864 .stats_mask = STATS_MASK_QUEUE,
868 /* Traffic manager node capabilities get */
870 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
872 struct rte_tm_node_capabilities *cap,
873 struct rte_tm_error *error)
875 struct tm_node *tm_node;
878 return -rte_tm_error_set(error,
880 RTE_TM_ERROR_TYPE_CAPABILITIES,
882 rte_strerror(EINVAL));
884 tm_node = tm_node_search(dev, node_id);
886 return -rte_tm_error_set(error,
888 RTE_TM_ERROR_TYPE_NODE_ID,
890 rte_strerror(EINVAL));
892 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
894 switch (tm_node->level) {
895 case TM_NODE_LEVEL_PORT:
896 cap->nonleaf.sched_n_children_max =
897 tm_level_get_max_nodes(dev,
898 TM_NODE_LEVEL_SUBPORT);
899 cap->nonleaf.sched_wfq_n_children_per_group_max =
900 cap->nonleaf.sched_n_children_max;
903 case TM_NODE_LEVEL_SUBPORT:
904 cap->nonleaf.sched_n_children_max =
905 tm_level_get_max_nodes(dev,
907 cap->nonleaf.sched_wfq_n_children_per_group_max =
908 cap->nonleaf.sched_n_children_max;
911 case TM_NODE_LEVEL_PIPE:
912 case TM_NODE_LEVEL_TC:
913 case TM_NODE_LEVEL_QUEUE:
922 shaper_profile_check(struct rte_eth_dev *dev,
923 uint32_t shaper_profile_id,
924 struct rte_tm_shaper_params *profile,
925 struct rte_tm_error *error)
927 struct tm_shaper_profile *sp;
929 /* Shaper profile ID must not be NONE. */
930 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
931 return -rte_tm_error_set(error,
933 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
935 rte_strerror(EINVAL));
937 /* Shaper profile must not exist. */
938 sp = tm_shaper_profile_search(dev, shaper_profile_id);
940 return -rte_tm_error_set(error,
942 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
944 rte_strerror(EEXIST));
946 /* Profile must not be NULL. */
948 return -rte_tm_error_set(error,
950 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
952 rte_strerror(EINVAL));
954 /* Peak rate: non-zero, 32-bit */
955 if (profile->peak.rate == 0 ||
956 profile->peak.rate >= UINT32_MAX)
957 return -rte_tm_error_set(error,
959 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
961 rte_strerror(EINVAL));
963 /* Peak size: non-zero, 32-bit */
964 if (profile->peak.size == 0 ||
965 profile->peak.size >= UINT32_MAX)
966 return -rte_tm_error_set(error,
968 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
970 rte_strerror(EINVAL));
972 /* Dual-rate profiles are not supported. */
973 if (profile->committed.rate != 0)
974 return -rte_tm_error_set(error,
976 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
978 rte_strerror(EINVAL));
980 /* Packet length adjust: 24 bytes */
981 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
982 return -rte_tm_error_set(error,
984 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
986 rte_strerror(EINVAL));
991 /* Traffic manager shaper profile add */
993 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
994 uint32_t shaper_profile_id,
995 struct rte_tm_shaper_params *profile,
996 struct rte_tm_error *error)
998 struct pmd_internals *p = dev->data->dev_private;
999 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
1000 struct tm_shaper_profile *sp;
1003 /* Check input params */
1004 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
1008 /* Memory allocation */
1009 sp = calloc(1, sizeof(struct tm_shaper_profile));
1011 return -rte_tm_error_set(error,
1013 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1015 rte_strerror(ENOMEM));
1018 sp->shaper_profile_id = shaper_profile_id;
1019 memcpy(&sp->params, profile, sizeof(sp->params));
1022 TAILQ_INSERT_TAIL(spl, sp, node);
1023 p->soft.tm.h.n_shaper_profiles++;
1028 /* Traffic manager shaper profile delete */
1030 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1031 uint32_t shaper_profile_id,
1032 struct rte_tm_error *error)
1034 struct pmd_internals *p = dev->data->dev_private;
1035 struct tm_shaper_profile *sp;
1037 /* Check existing */
1038 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1040 return -rte_tm_error_set(error,
1042 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1044 rte_strerror(EINVAL));
1048 return -rte_tm_error_set(error,
1050 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1052 rte_strerror(EBUSY));
1054 /* Remove from list */
1055 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1056 p->soft.tm.h.n_shaper_profiles--;
1062 static struct tm_node *
1063 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1064 struct tm_shared_shaper *ss)
1066 struct pmd_internals *p = dev->data->dev_private;
1067 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1070 /* Subport: each TC uses shared shaper */
1071 TAILQ_FOREACH(n, nl, node) {
1072 if (n->level != TM_NODE_LEVEL_TC ||
1073 n->params.n_shared_shapers == 0 ||
1074 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1084 update_subport_tc_rate(struct rte_eth_dev *dev,
1086 struct tm_shared_shaper *ss,
1087 struct tm_shaper_profile *sp_new)
1089 struct pmd_internals *p = dev->data->dev_private;
1090 uint32_t tc_id = tm_node_tc_id(dev, nt);
1092 struct tm_node *np = nt->parent_node;
1094 struct tm_node *ns = np->parent_node;
1095 uint32_t subport_id = tm_node_subport_id(dev, ns);
1097 struct rte_sched_subport_params subport_params;
1099 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1100 ss->shaper_profile_id);
1102 /* Derive new subport configuration. */
1103 memcpy(&subport_params,
1104 &p->soft.tm.params.subport_params[subport_id],
1105 sizeof(subport_params));
1106 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1108 /* Update the subport configuration. */
1109 if (rte_sched_subport_config(p->soft.tm.sched,
1110 subport_id, &subport_params))
1113 /* Commit changes. */
1116 ss->shaper_profile_id = sp_new->shaper_profile_id;
1119 memcpy(&p->soft.tm.params.subport_params[subport_id],
1121 sizeof(subport_params));
1126 /* Traffic manager shared shaper add/update */
1128 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1129 uint32_t shared_shaper_id,
1130 uint32_t shaper_profile_id,
1131 struct rte_tm_error *error)
1133 struct pmd_internals *p = dev->data->dev_private;
1134 struct tm_shared_shaper *ss;
1135 struct tm_shaper_profile *sp;
1138 /* Shaper profile must be valid. */
1139 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1141 return -rte_tm_error_set(error,
1143 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1145 rte_strerror(EINVAL));
1148 * Add new shared shaper
1150 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1152 struct tm_shared_shaper_list *ssl =
1153 &p->soft.tm.h.shared_shapers;
1155 /* Hierarchy must not be frozen */
1156 if (p->soft.tm.hierarchy_frozen)
1157 return -rte_tm_error_set(error,
1159 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1161 rte_strerror(EBUSY));
1163 /* Memory allocation */
1164 ss = calloc(1, sizeof(struct tm_shared_shaper));
1166 return -rte_tm_error_set(error,
1168 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1170 rte_strerror(ENOMEM));
1173 ss->shared_shaper_id = shared_shaper_id;
1174 ss->shaper_profile_id = shaper_profile_id;
1177 TAILQ_INSERT_TAIL(ssl, ss, node);
1178 p->soft.tm.h.n_shared_shapers++;
1184 * Update existing shared shaper
1186 /* Hierarchy must be frozen (run-time update) */
1187 if (p->soft.tm.hierarchy_frozen == 0)
1188 return -rte_tm_error_set(error,
1190 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1192 rte_strerror(EBUSY));
1195 /* Propagate change. */
1196 nt = tm_shared_shaper_get_tc(dev, ss);
1197 if (update_subport_tc_rate(dev, nt, ss, sp))
1198 return -rte_tm_error_set(error,
1200 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1202 rte_strerror(EINVAL));
1207 /* Traffic manager shared shaper delete */
1209 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1210 uint32_t shared_shaper_id,
1211 struct rte_tm_error *error)
1213 struct pmd_internals *p = dev->data->dev_private;
1214 struct tm_shared_shaper *ss;
1216 /* Check existing */
1217 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1219 return -rte_tm_error_set(error,
1221 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1223 rte_strerror(EINVAL));
1227 return -rte_tm_error_set(error,
1229 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1231 rte_strerror(EBUSY));
1233 /* Remove from list */
1234 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1235 p->soft.tm.h.n_shared_shapers--;
1242 wred_profile_check(struct rte_eth_dev *dev,
1243 uint32_t wred_profile_id,
1244 struct rte_tm_wred_params *profile,
1245 struct rte_tm_error *error)
1247 struct tm_wred_profile *wp;
1248 enum rte_tm_color color;
1250 /* WRED profile ID must not be NONE. */
1251 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1252 return -rte_tm_error_set(error,
1254 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1256 rte_strerror(EINVAL));
1258 /* WRED profile must not exist. */
1259 wp = tm_wred_profile_search(dev, wred_profile_id);
1261 return -rte_tm_error_set(error,
1263 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1265 rte_strerror(EEXIST));
1267 /* Profile must not be NULL. */
1268 if (profile == NULL)
1269 return -rte_tm_error_set(error,
1271 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1273 rte_strerror(EINVAL));
1275 /* min_th <= max_th, max_th > 0 */
1276 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1277 uint16_t min_th = profile->red_params[color].min_th;
1278 uint16_t max_th = profile->red_params[color].max_th;
1280 if (min_th > max_th || max_th == 0)
1281 return -rte_tm_error_set(error,
1283 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1285 rte_strerror(EINVAL));
1291 /* Traffic manager WRED profile add */
1293 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1294 uint32_t wred_profile_id,
1295 struct rte_tm_wred_params *profile,
1296 struct rte_tm_error *error)
1298 struct pmd_internals *p = dev->data->dev_private;
1299 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1300 struct tm_wred_profile *wp;
1303 /* Check input params */
1304 status = wred_profile_check(dev, wred_profile_id, profile, error);
1308 /* Memory allocation */
1309 wp = calloc(1, sizeof(struct tm_wred_profile));
1311 return -rte_tm_error_set(error,
1313 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1315 rte_strerror(ENOMEM));
1318 wp->wred_profile_id = wred_profile_id;
1319 memcpy(&wp->params, profile, sizeof(wp->params));
1322 TAILQ_INSERT_TAIL(wpl, wp, node);
1323 p->soft.tm.h.n_wred_profiles++;
1328 /* Traffic manager WRED profile delete */
1330 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1331 uint32_t wred_profile_id,
1332 struct rte_tm_error *error)
1334 struct pmd_internals *p = dev->data->dev_private;
1335 struct tm_wred_profile *wp;
1337 /* Check existing */
1338 wp = tm_wred_profile_search(dev, wred_profile_id);
1340 return -rte_tm_error_set(error,
1342 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1344 rte_strerror(EINVAL));
1348 return -rte_tm_error_set(error,
1350 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1352 rte_strerror(EBUSY));
1354 /* Remove from list */
1355 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1356 p->soft.tm.h.n_wred_profiles--;
1363 node_add_check_port(struct rte_eth_dev *dev,
1365 uint32_t parent_node_id __rte_unused,
1368 uint32_t level_id __rte_unused,
1369 struct rte_tm_node_params *params,
1370 struct rte_tm_error *error)
1372 struct pmd_internals *p = dev->data->dev_private;
1373 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1374 params->shaper_profile_id);
1376 /* node type: non-leaf */
1377 if (node_id < p->params.soft.tm.nb_queues)
1378 return -rte_tm_error_set(error,
1380 RTE_TM_ERROR_TYPE_NODE_ID,
1382 rte_strerror(EINVAL));
1384 /* Priority must be 0 */
1386 return -rte_tm_error_set(error,
1388 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1390 rte_strerror(EINVAL));
1392 /* Weight must be 1 */
1394 return -rte_tm_error_set(error,
1396 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1398 rte_strerror(EINVAL));
1400 /* Shaper must be valid.
1401 * Shaper profile peak rate must fit the configured port rate.
1403 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1405 sp->params.peak.rate > p->params.soft.tm.rate)
1406 return -rte_tm_error_set(error,
1408 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1410 rte_strerror(EINVAL));
1412 /* No shared shapers */
1413 if (params->n_shared_shapers != 0)
1414 return -rte_tm_error_set(error,
1416 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1418 rte_strerror(EINVAL));
1420 /* Number of SP priorities must be 1 */
1421 if (params->nonleaf.n_sp_priorities != 1)
1422 return -rte_tm_error_set(error,
1424 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1426 rte_strerror(EINVAL));
1429 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1430 return -rte_tm_error_set(error,
1432 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1434 rte_strerror(EINVAL));
1440 node_add_check_subport(struct rte_eth_dev *dev,
1442 uint32_t parent_node_id __rte_unused,
1445 uint32_t level_id __rte_unused,
1446 struct rte_tm_node_params *params,
1447 struct rte_tm_error *error)
1449 struct pmd_internals *p = dev->data->dev_private;
1451 /* node type: non-leaf */
1452 if (node_id < p->params.soft.tm.nb_queues)
1453 return -rte_tm_error_set(error,
1455 RTE_TM_ERROR_TYPE_NODE_ID,
1457 rte_strerror(EINVAL));
1459 /* Priority must be 0 */
1461 return -rte_tm_error_set(error,
1463 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1465 rte_strerror(EINVAL));
1467 /* Weight must be 1 */
1469 return -rte_tm_error_set(error,
1471 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1473 rte_strerror(EINVAL));
1475 /* Shaper must be valid */
1476 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1477 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1478 return -rte_tm_error_set(error,
1480 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1482 rte_strerror(EINVAL));
1484 /* No shared shapers */
1485 if (params->n_shared_shapers != 0)
1486 return -rte_tm_error_set(error,
1488 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1490 rte_strerror(EINVAL));
1492 /* Number of SP priorities must be 1 */
1493 if (params->nonleaf.n_sp_priorities != 1)
1494 return -rte_tm_error_set(error,
1496 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1498 rte_strerror(EINVAL));
1501 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1502 return -rte_tm_error_set(error,
1504 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1506 rte_strerror(EINVAL));
1512 node_add_check_pipe(struct rte_eth_dev *dev,
1514 uint32_t parent_node_id __rte_unused,
1516 uint32_t weight __rte_unused,
1517 uint32_t level_id __rte_unused,
1518 struct rte_tm_node_params *params,
1519 struct rte_tm_error *error)
1521 struct pmd_internals *p = dev->data->dev_private;
1523 /* node type: non-leaf */
1524 if (node_id < p->params.soft.tm.nb_queues)
1525 return -rte_tm_error_set(error,
1527 RTE_TM_ERROR_TYPE_NODE_ID,
1529 rte_strerror(EINVAL));
1531 /* Priority must be 0 */
1533 return -rte_tm_error_set(error,
1535 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1537 rte_strerror(EINVAL));
1539 /* Shaper must be valid */
1540 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1541 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1542 return -rte_tm_error_set(error,
1544 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1546 rte_strerror(EINVAL));
1548 /* No shared shapers */
1549 if (params->n_shared_shapers != 0)
1550 return -rte_tm_error_set(error,
1552 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1554 rte_strerror(EINVAL));
1556 /* Number of SP priorities must be 4 */
1557 if (params->nonleaf.n_sp_priorities !=
1558 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1559 return -rte_tm_error_set(error,
1561 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1563 rte_strerror(EINVAL));
1565 /* WFQ mode must be byte mode */
1566 if (params->nonleaf.wfq_weight_mode != NULL &&
1567 params->nonleaf.wfq_weight_mode[0] != 0 &&
1568 params->nonleaf.wfq_weight_mode[1] != 0 &&
1569 params->nonleaf.wfq_weight_mode[2] != 0 &&
1570 params->nonleaf.wfq_weight_mode[3] != 0)
1571 return -rte_tm_error_set(error,
1573 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1575 rte_strerror(EINVAL));
1578 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1579 return -rte_tm_error_set(error,
1581 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1583 rte_strerror(EINVAL));
1589 node_add_check_tc(struct rte_eth_dev *dev,
1591 uint32_t parent_node_id __rte_unused,
1592 uint32_t priority __rte_unused,
1594 uint32_t level_id __rte_unused,
1595 struct rte_tm_node_params *params,
1596 struct rte_tm_error *error)
1598 struct pmd_internals *p = dev->data->dev_private;
1600 /* node type: non-leaf */
1601 if (node_id < p->params.soft.tm.nb_queues)
1602 return -rte_tm_error_set(error,
1604 RTE_TM_ERROR_TYPE_NODE_ID,
1606 rte_strerror(EINVAL));
1608 /* Weight must be 1 */
1610 return -rte_tm_error_set(error,
1612 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1614 rte_strerror(EINVAL));
1616 /* Shaper must be valid */
1617 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1618 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1619 return -rte_tm_error_set(error,
1621 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1623 rte_strerror(EINVAL));
1625 /* Single valid shared shaper */
1626 if (params->n_shared_shapers > 1)
1627 return -rte_tm_error_set(error,
1629 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1631 rte_strerror(EINVAL));
1633 if (params->n_shared_shapers == 1 &&
1634 (params->shared_shaper_id == NULL ||
1635 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1636 return -rte_tm_error_set(error,
1638 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1640 rte_strerror(EINVAL));
1642 /* Number of priorities must be 1 */
1643 if (params->nonleaf.n_sp_priorities != 1)
1644 return -rte_tm_error_set(error,
1646 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1648 rte_strerror(EINVAL));
1651 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1652 return -rte_tm_error_set(error,
1654 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1656 rte_strerror(EINVAL));
1662 node_add_check_queue(struct rte_eth_dev *dev,
1664 uint32_t parent_node_id __rte_unused,
1666 uint32_t weight __rte_unused,
1667 uint32_t level_id __rte_unused,
1668 struct rte_tm_node_params *params,
1669 struct rte_tm_error *error)
1671 struct pmd_internals *p = dev->data->dev_private;
1673 /* node type: leaf */
1674 if (node_id >= p->params.soft.tm.nb_queues)
1675 return -rte_tm_error_set(error,
1677 RTE_TM_ERROR_TYPE_NODE_ID,
1679 rte_strerror(EINVAL));
1681 /* Priority must be 0 */
1683 return -rte_tm_error_set(error,
1685 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1687 rte_strerror(EINVAL));
1690 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1691 return -rte_tm_error_set(error,
1693 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1695 rte_strerror(EINVAL));
1697 /* No shared shapers */
1698 if (params->n_shared_shapers != 0)
1699 return -rte_tm_error_set(error,
1701 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1703 rte_strerror(EINVAL));
1705 /* Congestion management must not be head drop */
1706 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1707 return -rte_tm_error_set(error,
1709 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1711 rte_strerror(EINVAL));
1713 /* Congestion management set to WRED */
1714 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1715 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1716 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1719 /* WRED profile (for private WRED context) must be valid */
1720 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1722 return -rte_tm_error_set(error,
1724 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1726 rte_strerror(EINVAL));
1728 /* No shared WRED contexts */
1729 if (params->leaf.wred.n_shared_wred_contexts != 0)
1730 return -rte_tm_error_set(error,
1732 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1734 rte_strerror(EINVAL));
1738 if (params->stats_mask & ~STATS_MASK_QUEUE)
1739 return -rte_tm_error_set(error,
1741 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1743 rte_strerror(EINVAL));
1749 node_add_check(struct rte_eth_dev *dev,
1751 uint32_t parent_node_id,
1755 struct rte_tm_node_params *params,
1756 struct rte_tm_error *error)
1762 /* node_id, parent_node_id:
1763 * -node_id must not be RTE_TM_NODE_ID_NULL
1764 * -node_id must not be in use
1765 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1766 * -root node must not exist
1767 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1768 * -parent_node_id must be valid
1770 if (node_id == RTE_TM_NODE_ID_NULL)
1771 return -rte_tm_error_set(error,
1773 RTE_TM_ERROR_TYPE_NODE_ID,
1775 rte_strerror(EINVAL));
1777 if (tm_node_search(dev, node_id))
1778 return -rte_tm_error_set(error,
1780 RTE_TM_ERROR_TYPE_NODE_ID,
1782 rte_strerror(EEXIST));
1784 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1786 if (tm_root_node_present(dev))
1787 return -rte_tm_error_set(error,
1789 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1791 rte_strerror(EEXIST));
1793 pn = tm_node_search(dev, parent_node_id);
1795 return -rte_tm_error_set(error,
1797 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1799 rte_strerror(EINVAL));
1802 /* priority: must be 0 .. 3 */
1803 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1804 return -rte_tm_error_set(error,
1806 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1808 rte_strerror(EINVAL));
1810 /* weight: must be 1 .. 255 */
1811 if (weight == 0 || weight >= UINT8_MAX)
1812 return -rte_tm_error_set(error,
1814 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1816 rte_strerror(EINVAL));
1818 /* level_id: if valid, then
1819 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1820 * -level_id must be zero
1821 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1822 * -level_id must be parent level ID plus one
1824 level = (pn == NULL) ? 0 : pn->level + 1;
1825 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1826 return -rte_tm_error_set(error,
1828 RTE_TM_ERROR_TYPE_LEVEL_ID,
1830 rte_strerror(EINVAL));
1832 /* params: must not be NULL */
1834 return -rte_tm_error_set(error,
1836 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1838 rte_strerror(EINVAL));
1840 /* params: per level checks */
1842 case TM_NODE_LEVEL_PORT:
1843 status = node_add_check_port(dev, node_id,
1844 parent_node_id, priority, weight, level_id,
1850 case TM_NODE_LEVEL_SUBPORT:
1851 status = node_add_check_subport(dev, node_id,
1852 parent_node_id, priority, weight, level_id,
1858 case TM_NODE_LEVEL_PIPE:
1859 status = node_add_check_pipe(dev, node_id,
1860 parent_node_id, priority, weight, level_id,
1866 case TM_NODE_LEVEL_TC:
1867 status = node_add_check_tc(dev, node_id,
1868 parent_node_id, priority, weight, level_id,
1874 case TM_NODE_LEVEL_QUEUE:
1875 status = node_add_check_queue(dev, node_id,
1876 parent_node_id, priority, weight, level_id,
1883 return -rte_tm_error_set(error,
1885 RTE_TM_ERROR_TYPE_LEVEL_ID,
1887 rte_strerror(EINVAL));
1893 /* Traffic manager node add */
1895 pmd_tm_node_add(struct rte_eth_dev *dev,
1897 uint32_t parent_node_id,
1901 struct rte_tm_node_params *params,
1902 struct rte_tm_error *error)
1904 struct pmd_internals *p = dev->data->dev_private;
1905 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1911 if (p->soft.tm.hierarchy_frozen)
1912 return -rte_tm_error_set(error,
1914 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1916 rte_strerror(EBUSY));
1918 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1919 level_id, params, error);
1923 /* Memory allocation */
1924 n = calloc(1, sizeof(struct tm_node));
1926 return -rte_tm_error_set(error,
1928 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1930 rte_strerror(ENOMEM));
1933 n->node_id = node_id;
1934 n->parent_node_id = parent_node_id;
1935 n->priority = priority;
1938 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1939 n->parent_node = tm_node_search(dev, parent_node_id);
1940 n->level = n->parent_node->level + 1;
1943 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1944 n->shaper_profile = tm_shaper_profile_search(dev,
1945 params->shaper_profile_id);
1947 if (n->level == TM_NODE_LEVEL_QUEUE &&
1948 params->leaf.cman == RTE_TM_CMAN_WRED)
1949 n->wred_profile = tm_wred_profile_search(dev,
1950 params->leaf.wred.wred_profile_id);
1952 memcpy(&n->params, params, sizeof(n->params));
1955 TAILQ_INSERT_TAIL(nl, n, node);
1956 p->soft.tm.h.n_nodes++;
1958 /* Update dependencies */
1960 n->parent_node->n_children++;
1962 if (n->shaper_profile)
1963 n->shaper_profile->n_users++;
1965 for (i = 0; i < params->n_shared_shapers; i++) {
1966 struct tm_shared_shaper *ss;
1968 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1972 if (n->wred_profile)
1973 n->wred_profile->n_users++;
1975 p->soft.tm.h.n_tm_nodes[n->level]++;
1980 /* Traffic manager node delete */
1982 pmd_tm_node_delete(struct rte_eth_dev *dev,
1984 struct rte_tm_error *error)
1986 struct pmd_internals *p = dev->data->dev_private;
1990 /* Check hierarchy changes are currently allowed */
1991 if (p->soft.tm.hierarchy_frozen)
1992 return -rte_tm_error_set(error,
1994 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1996 rte_strerror(EBUSY));
1998 /* Check existing */
1999 n = tm_node_search(dev, node_id);
2001 return -rte_tm_error_set(error,
2003 RTE_TM_ERROR_TYPE_NODE_ID,
2005 rte_strerror(EINVAL));
2009 return -rte_tm_error_set(error,
2011 RTE_TM_ERROR_TYPE_NODE_ID,
2013 rte_strerror(EBUSY));
2015 /* Update dependencies */
2016 p->soft.tm.h.n_tm_nodes[n->level]--;
2018 if (n->wred_profile)
2019 n->wred_profile->n_users--;
2021 for (i = 0; i < n->params.n_shared_shapers; i++) {
2022 struct tm_shared_shaper *ss;
2024 ss = tm_shared_shaper_search(dev,
2025 n->params.shared_shaper_id[i]);
2029 if (n->shaper_profile)
2030 n->shaper_profile->n_users--;
2033 n->parent_node->n_children--;
2035 /* Remove from list */
2036 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2037 p->soft.tm.h.n_nodes--;
2045 pipe_profile_build(struct rte_eth_dev *dev,
2047 struct rte_sched_pipe_params *pp)
2049 struct pmd_internals *p = dev->data->dev_private;
2050 struct tm_hierarchy *h = &p->soft.tm.h;
2051 struct tm_node_list *nl = &h->nodes;
2052 struct tm_node *nt, *nq;
2054 memset(pp, 0, sizeof(*pp));
2057 pp->tb_rate = np->shaper_profile->params.peak.rate;
2058 pp->tb_size = np->shaper_profile->params.peak.size;
2060 /* Traffic Class (TC) */
2061 pp->tc_period = PIPE_TC_PERIOD;
2063 #ifdef RTE_SCHED_SUBPORT_TC_OV
2064 pp->tc_ov_weight = np->weight;
2067 TAILQ_FOREACH(nt, nl, node) {
2068 uint32_t queue_id = 0;
2070 if (nt->level != TM_NODE_LEVEL_TC ||
2071 nt->parent_node_id != np->node_id)
2074 pp->tc_rate[nt->priority] =
2075 nt->shaper_profile->params.peak.rate;
2078 TAILQ_FOREACH(nq, nl, node) {
2079 uint32_t pipe_queue_id;
2081 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2082 nq->parent_node_id != nt->node_id)
2085 pipe_queue_id = nt->priority *
2086 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2087 pp->wrr_weights[pipe_queue_id] = nq->weight;
2095 pipe_profile_free_exists(struct rte_eth_dev *dev,
2096 uint32_t *pipe_profile_id)
2098 struct pmd_internals *p = dev->data->dev_private;
2099 struct tm_params *t = &p->soft.tm.params;
2101 if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2102 *pipe_profile_id = t->n_pipe_profiles;
2110 pipe_profile_exists(struct rte_eth_dev *dev,
2111 struct rte_sched_pipe_params *pp,
2112 uint32_t *pipe_profile_id)
2114 struct pmd_internals *p = dev->data->dev_private;
2115 struct tm_params *t = &p->soft.tm.params;
2118 for (i = 0; i < t->n_pipe_profiles; i++)
2119 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2120 if (pipe_profile_id)
2121 *pipe_profile_id = i;
2129 pipe_profile_install(struct rte_eth_dev *dev,
2130 struct rte_sched_pipe_params *pp,
2131 uint32_t pipe_profile_id)
2133 struct pmd_internals *p = dev->data->dev_private;
2134 struct tm_params *t = &p->soft.tm.params;
2136 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2137 t->n_pipe_profiles++;
2141 pipe_profile_mark(struct rte_eth_dev *dev,
2142 uint32_t subport_id,
2144 uint32_t pipe_profile_id)
2146 struct pmd_internals *p = dev->data->dev_private;
2147 struct tm_hierarchy *h = &p->soft.tm.h;
2148 struct tm_params *t = &p->soft.tm.params;
2149 uint32_t n_pipes_per_subport, pos;
2151 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2152 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2153 pos = subport_id * n_pipes_per_subport + pipe_id;
2155 t->pipe_to_profile[pos] = pipe_profile_id;
2158 static struct rte_sched_pipe_params *
2159 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2161 struct pmd_internals *p = dev->data->dev_private;
2162 struct tm_hierarchy *h = &p->soft.tm.h;
2163 struct tm_params *t = &p->soft.tm.params;
2164 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2165 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2167 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2168 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2170 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2171 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2173 return &t->pipe_profiles[pipe_profile_id];
2177 pipe_profiles_generate(struct rte_eth_dev *dev)
2179 struct pmd_internals *p = dev->data->dev_private;
2180 struct tm_hierarchy *h = &p->soft.tm.h;
2181 struct tm_node_list *nl = &h->nodes;
2182 struct tm_node *ns, *np;
2183 uint32_t subport_id;
2185 /* Objective: Fill in the following fields in struct tm_params:
2192 TAILQ_FOREACH(ns, nl, node) {
2195 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2199 TAILQ_FOREACH(np, nl, node) {
2200 struct rte_sched_pipe_params pp;
2203 if (np->level != TM_NODE_LEVEL_PIPE ||
2204 np->parent_node_id != ns->node_id)
2207 pipe_profile_build(dev, np, &pp);
2209 if (!pipe_profile_exists(dev, &pp, &pos)) {
2210 if (!pipe_profile_free_exists(dev, &pos))
2213 pipe_profile_install(dev, &pp, pos);
2216 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2227 static struct tm_wred_profile *
2228 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2230 struct pmd_internals *p = dev->data->dev_private;
2231 struct tm_hierarchy *h = &p->soft.tm.h;
2232 struct tm_node_list *nl = &h->nodes;
2235 TAILQ_FOREACH(nq, nl, node) {
2236 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2237 nq->parent_node->priority != tc_id)
2240 return nq->wred_profile;
2246 #ifdef RTE_SCHED_RED
2249 wred_profiles_set(struct rte_eth_dev *dev)
2251 struct pmd_internals *p = dev->data->dev_private;
2252 struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2254 enum rte_tm_color color;
2256 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2257 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2258 struct rte_red_params *dst =
2259 &pp->red_params[tc_id][color];
2260 struct tm_wred_profile *src_wp =
2261 tm_tc_wred_profile_get(dev, tc_id);
2262 struct rte_tm_red_params *src =
2263 &src_wp->params.red_params[color];
2265 memcpy(dst, src, sizeof(*dst));
2271 #define wred_profiles_set(dev)
2275 static struct tm_shared_shaper *
2276 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2278 return (tc_node->params.n_shared_shapers) ?
2279 tm_shared_shaper_search(dev,
2280 tc_node->params.shared_shaper_id[0]) :
2284 static struct tm_shared_shaper *
2285 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2286 struct tm_node *subport_node,
2289 struct pmd_internals *p = dev->data->dev_private;
2290 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2293 TAILQ_FOREACH(n, nl, node) {
2294 if (n->level != TM_NODE_LEVEL_TC ||
2295 n->parent_node->parent_node_id !=
2296 subport_node->node_id ||
2297 n->priority != tc_id)
2300 return tm_tc_shared_shaper_get(dev, n);
2307 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2309 struct pmd_internals *p = dev->data->dev_private;
2310 struct tm_hierarchy *h = &p->soft.tm.h;
2311 struct tm_node_list *nl = &h->nodes;
2312 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2313 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2314 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2315 struct tm_shared_shaper *ss;
2317 uint32_t n_pipes_per_subport;
2319 /* Root node exists. */
2321 return -rte_tm_error_set(error,
2323 RTE_TM_ERROR_TYPE_LEVEL_ID,
2325 rte_strerror(EINVAL));
2327 /* There is at least one subport, max is not exceeded. */
2328 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2329 return -rte_tm_error_set(error,
2331 RTE_TM_ERROR_TYPE_LEVEL_ID,
2333 rte_strerror(EINVAL));
2335 /* There is at least one pipe. */
2336 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2337 return -rte_tm_error_set(error,
2339 RTE_TM_ERROR_TYPE_LEVEL_ID,
2341 rte_strerror(EINVAL));
2343 /* Number of pipes is the same for all subports. Maximum number of pipes
2344 * per subport is not exceeded.
2346 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2347 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2349 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2350 return -rte_tm_error_set(error,
2352 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2354 rte_strerror(EINVAL));
2356 TAILQ_FOREACH(ns, nl, node) {
2357 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2360 if (ns->n_children != n_pipes_per_subport)
2361 return -rte_tm_error_set(error,
2363 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2365 rte_strerror(EINVAL));
2368 /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2369 TAILQ_FOREACH(np, nl, node) {
2370 uint32_t mask = 0, mask_expected =
2371 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2374 if (np->level != TM_NODE_LEVEL_PIPE)
2377 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2378 return -rte_tm_error_set(error,
2380 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2382 rte_strerror(EINVAL));
2384 TAILQ_FOREACH(nt, nl, node) {
2385 if (nt->level != TM_NODE_LEVEL_TC ||
2386 nt->parent_node_id != np->node_id)
2389 mask |= 1 << nt->priority;
2392 if (mask != mask_expected)
2393 return -rte_tm_error_set(error,
2395 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2397 rte_strerror(EINVAL));
2400 /* Each TC has exactly 4 packet queues. */
2401 TAILQ_FOREACH(nt, nl, node) {
2402 if (nt->level != TM_NODE_LEVEL_TC)
2405 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2406 return -rte_tm_error_set(error,
2408 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2410 rte_strerror(EINVAL));
2415 * -For each TC #i, all pipes in the same subport use the same
2416 * shared shaper (or no shared shaper) for their TC#i.
2417 * -Each shared shaper needs to have at least one user. All its
2418 * users have to be TC nodes with the same priority and the same
2421 TAILQ_FOREACH(ns, nl, node) {
2422 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2425 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2428 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2429 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2431 TAILQ_FOREACH(nt, nl, node) {
2432 struct tm_shared_shaper *subport_ss, *tc_ss;
2434 if (nt->level != TM_NODE_LEVEL_TC ||
2435 nt->parent_node->parent_node_id !=
2439 subport_ss = s[nt->priority];
2440 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2442 if (subport_ss == NULL && tc_ss == NULL)
2445 if ((subport_ss == NULL && tc_ss != NULL) ||
2446 (subport_ss != NULL && tc_ss == NULL) ||
2447 subport_ss->shared_shaper_id !=
2448 tc_ss->shared_shaper_id)
2449 return -rte_tm_error_set(error,
2451 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2453 rte_strerror(EINVAL));
2457 TAILQ_FOREACH(ss, ssl, node) {
2458 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2459 uint32_t n_users = 0;
2462 TAILQ_FOREACH(nt, nl, node) {
2463 if (nt->level != TM_NODE_LEVEL_TC ||
2464 nt->priority != nt_any->priority ||
2465 nt->parent_node->parent_node_id !=
2466 nt_any->parent_node->parent_node_id)
2472 if (ss->n_users == 0 || ss->n_users != n_users)
2473 return -rte_tm_error_set(error,
2475 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2477 rte_strerror(EINVAL));
2480 /* Not too many pipe profiles. */
2481 if (pipe_profiles_generate(dev))
2482 return -rte_tm_error_set(error,
2484 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2486 rte_strerror(EINVAL));
2489 * WRED (when used, i.e. at least one WRED profile defined):
2490 * -Each WRED profile must have at least one user.
2491 * -All leaf nodes must have their private WRED context enabled.
2492 * -For each TC #i, all leaf nodes must use the same WRED profile
2493 * for their private WRED context.
2495 if (h->n_wred_profiles) {
2496 struct tm_wred_profile *wp;
2497 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2500 TAILQ_FOREACH(wp, wpl, node)
2501 if (wp->n_users == 0)
2502 return -rte_tm_error_set(error,
2504 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2506 rte_strerror(EINVAL));
2508 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2509 w[id] = tm_tc_wred_profile_get(dev, id);
2512 return -rte_tm_error_set(error,
2514 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2516 rte_strerror(EINVAL));
2519 TAILQ_FOREACH(nq, nl, node) {
2522 if (nq->level != TM_NODE_LEVEL_QUEUE)
2525 id = nq->parent_node->priority;
2527 if (nq->wred_profile == NULL ||
2528 nq->wred_profile->wred_profile_id !=
2529 w[id]->wred_profile_id)
2530 return -rte_tm_error_set(error,
2532 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2534 rte_strerror(EINVAL));
2542 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2544 struct pmd_internals *p = dev->data->dev_private;
2545 struct tm_params *t = &p->soft.tm.params;
2546 struct tm_hierarchy *h = &p->soft.tm.h;
2548 struct tm_node_list *nl = &h->nodes;
2549 struct tm_node *root = tm_root_node_present(dev), *n;
2551 uint32_t subport_id;
2553 t->port_params = (struct rte_sched_port_params) {
2554 .name = dev->data->name,
2555 .socket = dev->data->numa_node,
2556 .rate = root->shaper_profile->params.peak.rate,
2557 .mtu = dev->data->mtu,
2559 root->shaper_profile->params.pkt_length_adjust,
2560 .n_subports_per_port = root->n_children,
2561 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2562 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2563 .qsize = {p->params.soft.tm.qsize[0],
2564 p->params.soft.tm.qsize[1],
2565 p->params.soft.tm.qsize[2],
2566 p->params.soft.tm.qsize[3],
2568 .pipe_profiles = t->pipe_profiles,
2569 .n_pipe_profiles = t->n_pipe_profiles,
2572 wred_profiles_set(dev);
2575 TAILQ_FOREACH(n, nl, node) {
2576 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2579 if (n->level != TM_NODE_LEVEL_SUBPORT)
2582 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2583 struct tm_shared_shaper *ss;
2584 struct tm_shaper_profile *sp;
2586 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2587 sp = (ss) ? tm_shaper_profile_search(dev,
2588 ss->shaper_profile_id) :
2590 tc_rate[i] = sp->params.peak.rate;
2593 t->subport_params[subport_id] =
2594 (struct rte_sched_subport_params) {
2595 .tb_rate = n->shaper_profile->params.peak.rate,
2596 .tb_size = n->shaper_profile->params.peak.size,
2598 .tc_rate = {tc_rate[0],
2603 .tc_period = SUBPORT_TC_PERIOD,
2610 /* Traffic manager hierarchy commit */
2612 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2614 struct rte_tm_error *error)
2616 struct pmd_internals *p = dev->data->dev_private;
2620 if (p->soft.tm.hierarchy_frozen)
2621 return -rte_tm_error_set(error,
2623 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2625 rte_strerror(EBUSY));
2627 status = hierarchy_commit_check(dev, error);
2629 if (clear_on_fail) {
2630 tm_hierarchy_uninit(p);
2631 tm_hierarchy_init(p);
2637 /* Create blueprints */
2638 hierarchy_blueprints_create(dev);
2640 /* Freeze hierarchy */
2641 p->soft.tm.hierarchy_frozen = 1;
2646 #ifdef RTE_SCHED_SUBPORT_TC_OV
2649 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2651 struct pmd_internals *p = dev->data->dev_private;
2652 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2654 struct tm_node *ns = np->parent_node;
2655 uint32_t subport_id = tm_node_subport_id(dev, ns);
2657 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2658 struct rte_sched_pipe_params profile1;
2659 uint32_t pipe_profile_id;
2661 /* Derive new pipe profile. */
2662 memcpy(&profile1, profile0, sizeof(profile1));
2663 profile1.tc_ov_weight = (uint8_t)weight;
2665 /* Since implementation does not allow adding more pipe profiles after
2666 * port configuration, the pipe configuration can be successfully
2667 * updated only if the new profile is also part of the existing set of
2670 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2673 /* Update the pipe profile used by the current pipe. */
2674 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2675 (int32_t)pipe_profile_id))
2678 /* Commit changes. */
2679 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2680 np->weight = weight;
2688 update_queue_weight(struct rte_eth_dev *dev,
2689 struct tm_node *nq, uint32_t weight)
2691 struct pmd_internals *p = dev->data->dev_private;
2692 uint32_t queue_id = tm_node_queue_id(dev, nq);
2694 struct tm_node *nt = nq->parent_node;
2695 uint32_t tc_id = tm_node_tc_id(dev, nt);
2697 struct tm_node *np = nt->parent_node;
2698 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2700 struct tm_node *ns = np->parent_node;
2701 uint32_t subport_id = tm_node_subport_id(dev, ns);
2703 uint32_t pipe_queue_id =
2704 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2706 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2707 struct rte_sched_pipe_params profile1;
2708 uint32_t pipe_profile_id;
2710 /* Derive new pipe profile. */
2711 memcpy(&profile1, profile0, sizeof(profile1));
2712 profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2714 /* Since implementation does not allow adding more pipe profiles after
2715 * port configuration, the pipe configuration can be successfully
2716 * updated only if the new profile is also part of the existing set
2719 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2722 /* Update the pipe profile used by the current pipe. */
2723 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2724 (int32_t)pipe_profile_id))
2727 /* Commit changes. */
2728 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2729 nq->weight = weight;
2734 /* Traffic manager node parent update */
2736 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2738 uint32_t parent_node_id,
2741 struct rte_tm_error *error)
2745 /* Port must be started and TM used. */
2746 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2747 return -rte_tm_error_set(error,
2749 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2751 rte_strerror(EBUSY));
2753 /* Node must be valid */
2754 n = tm_node_search(dev, node_id);
2756 return -rte_tm_error_set(error,
2758 RTE_TM_ERROR_TYPE_NODE_ID,
2760 rte_strerror(EINVAL));
2762 /* Parent node must be the same */
2763 if (n->parent_node_id != parent_node_id)
2764 return -rte_tm_error_set(error,
2766 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2768 rte_strerror(EINVAL));
2770 /* Priority must be the same */
2771 if (n->priority != priority)
2772 return -rte_tm_error_set(error,
2774 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2776 rte_strerror(EINVAL));
2778 /* weight: must be 1 .. 255 */
2779 if (weight == 0 || weight >= UINT8_MAX)
2780 return -rte_tm_error_set(error,
2782 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2784 rte_strerror(EINVAL));
2787 case TM_NODE_LEVEL_PORT:
2788 return -rte_tm_error_set(error,
2790 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2792 rte_strerror(EINVAL));
2794 case TM_NODE_LEVEL_SUBPORT:
2795 return -rte_tm_error_set(error,
2797 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2799 rte_strerror(EINVAL));
2801 case TM_NODE_LEVEL_PIPE:
2802 #ifdef RTE_SCHED_SUBPORT_TC_OV
2803 if (update_pipe_weight(dev, n, weight))
2804 return -rte_tm_error_set(error,
2806 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2808 rte_strerror(EINVAL));
2811 return -rte_tm_error_set(error,
2813 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2815 rte_strerror(EINVAL));
2818 case TM_NODE_LEVEL_TC:
2819 return -rte_tm_error_set(error,
2821 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2823 rte_strerror(EINVAL));
2825 case TM_NODE_LEVEL_QUEUE:
2828 if (update_queue_weight(dev, n, weight))
2829 return -rte_tm_error_set(error,
2831 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2833 rte_strerror(EINVAL));
2839 update_subport_rate(struct rte_eth_dev *dev,
2841 struct tm_shaper_profile *sp)
2843 struct pmd_internals *p = dev->data->dev_private;
2844 uint32_t subport_id = tm_node_subport_id(dev, ns);
2846 struct rte_sched_subport_params subport_params;
2848 /* Derive new subport configuration. */
2849 memcpy(&subport_params,
2850 &p->soft.tm.params.subport_params[subport_id],
2851 sizeof(subport_params));
2852 subport_params.tb_rate = sp->params.peak.rate;
2853 subport_params.tb_size = sp->params.peak.size;
2855 /* Update the subport configuration. */
2856 if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2860 /* Commit changes. */
2861 ns->shaper_profile->n_users--;
2863 ns->shaper_profile = sp;
2864 ns->params.shaper_profile_id = sp->shaper_profile_id;
2867 memcpy(&p->soft.tm.params.subport_params[subport_id],
2869 sizeof(subport_params));
2875 update_pipe_rate(struct rte_eth_dev *dev,
2877 struct tm_shaper_profile *sp)
2879 struct pmd_internals *p = dev->data->dev_private;
2880 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2882 struct tm_node *ns = np->parent_node;
2883 uint32_t subport_id = tm_node_subport_id(dev, ns);
2885 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2886 struct rte_sched_pipe_params profile1;
2887 uint32_t pipe_profile_id;
2889 /* Derive new pipe profile. */
2890 memcpy(&profile1, profile0, sizeof(profile1));
2891 profile1.tb_rate = sp->params.peak.rate;
2892 profile1.tb_size = sp->params.peak.size;
2894 /* Since implementation does not allow adding more pipe profiles after
2895 * port configuration, the pipe configuration can be successfully
2896 * updated only if the new profile is also part of the existing set of
2899 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2902 /* Update the pipe profile used by the current pipe. */
2903 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2904 (int32_t)pipe_profile_id))
2907 /* Commit changes. */
2908 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2909 np->shaper_profile->n_users--;
2910 np->shaper_profile = sp;
2911 np->params.shaper_profile_id = sp->shaper_profile_id;
2918 update_tc_rate(struct rte_eth_dev *dev,
2920 struct tm_shaper_profile *sp)
2922 struct pmd_internals *p = dev->data->dev_private;
2923 uint32_t tc_id = tm_node_tc_id(dev, nt);
2925 struct tm_node *np = nt->parent_node;
2926 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2928 struct tm_node *ns = np->parent_node;
2929 uint32_t subport_id = tm_node_subport_id(dev, ns);
2931 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2932 struct rte_sched_pipe_params profile1;
2933 uint32_t pipe_profile_id;
2935 /* Derive new pipe profile. */
2936 memcpy(&profile1, profile0, sizeof(profile1));
2937 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2939 /* Since implementation does not allow adding more pipe profiles after
2940 * port configuration, the pipe configuration can be successfully
2941 * updated only if the new profile is also part of the existing set of
2944 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2947 /* Update the pipe profile used by the current pipe. */
2948 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2949 (int32_t)pipe_profile_id))
2952 /* Commit changes. */
2953 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2954 nt->shaper_profile->n_users--;
2955 nt->shaper_profile = sp;
2956 nt->params.shaper_profile_id = sp->shaper_profile_id;
2962 /* Traffic manager node shaper update */
2964 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2966 uint32_t shaper_profile_id,
2967 struct rte_tm_error *error)
2970 struct tm_shaper_profile *sp;
2972 /* Port must be started and TM used. */
2973 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2974 return -rte_tm_error_set(error,
2976 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2978 rte_strerror(EBUSY));
2980 /* Node must be valid */
2981 n = tm_node_search(dev, node_id);
2983 return -rte_tm_error_set(error,
2985 RTE_TM_ERROR_TYPE_NODE_ID,
2987 rte_strerror(EINVAL));
2989 /* Shaper profile must be valid. */
2990 sp = tm_shaper_profile_search(dev, shaper_profile_id);
2992 return -rte_tm_error_set(error,
2994 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2996 rte_strerror(EINVAL));
2999 case TM_NODE_LEVEL_PORT:
3000 return -rte_tm_error_set(error,
3002 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3004 rte_strerror(EINVAL));
3006 case TM_NODE_LEVEL_SUBPORT:
3007 if (update_subport_rate(dev, n, sp))
3008 return -rte_tm_error_set(error,
3010 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3012 rte_strerror(EINVAL));
3015 case TM_NODE_LEVEL_PIPE:
3016 if (update_pipe_rate(dev, n, sp))
3017 return -rte_tm_error_set(error,
3019 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3021 rte_strerror(EINVAL));
3024 case TM_NODE_LEVEL_TC:
3025 if (update_tc_rate(dev, n, sp))
3026 return -rte_tm_error_set(error,
3028 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3030 rte_strerror(EINVAL));
3033 case TM_NODE_LEVEL_QUEUE:
3036 return -rte_tm_error_set(error,
3038 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3040 rte_strerror(EINVAL));
3044 static inline uint32_t
3045 tm_port_queue_id(struct rte_eth_dev *dev,
3046 uint32_t port_subport_id,
3047 uint32_t subport_pipe_id,
3048 uint32_t pipe_tc_id,
3049 uint32_t tc_queue_id)
3051 struct pmd_internals *p = dev->data->dev_private;
3052 struct tm_hierarchy *h = &p->soft.tm.h;
3053 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3054 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3056 uint32_t port_pipe_id =
3057 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3058 uint32_t port_tc_id =
3059 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3060 uint32_t port_queue_id =
3061 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3063 return port_queue_id;
3067 read_port_stats(struct rte_eth_dev *dev,
3069 struct rte_tm_node_stats *stats,
3070 uint64_t *stats_mask,
3073 struct pmd_internals *p = dev->data->dev_private;
3074 struct tm_hierarchy *h = &p->soft.tm.h;
3075 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3076 uint32_t subport_id;
3078 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3079 struct rte_sched_subport_stats s;
3083 int status = rte_sched_subport_read_stats(
3091 /* Stats accumulate */
3092 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3094 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3095 nr->stats.n_bytes +=
3096 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3097 nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3098 s.n_pkts_tc_dropped[id];
3099 nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3100 s.n_bytes_tc_dropped[id];
3106 memcpy(stats, &nr->stats, sizeof(*stats));
3109 *stats_mask = STATS_MASK_DEFAULT;
3113 memset(&nr->stats, 0, sizeof(nr->stats));
3119 read_subport_stats(struct rte_eth_dev *dev,
3121 struct rte_tm_node_stats *stats,
3122 uint64_t *stats_mask,
3125 struct pmd_internals *p = dev->data->dev_private;
3126 uint32_t subport_id = tm_node_subport_id(dev, ns);
3127 struct rte_sched_subport_stats s;
3128 uint32_t tc_ov, tc_id;
3131 int status = rte_sched_subport_read_stats(
3139 /* Stats accumulate */
3140 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3142 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3143 ns->stats.n_bytes +=
3144 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3145 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3146 s.n_pkts_tc_dropped[tc_id];
3147 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3148 s.n_bytes_tc_dropped[tc_id];
3153 memcpy(stats, &ns->stats, sizeof(*stats));
3156 *stats_mask = STATS_MASK_DEFAULT;
3160 memset(&ns->stats, 0, sizeof(ns->stats));
3166 read_pipe_stats(struct rte_eth_dev *dev,
3168 struct rte_tm_node_stats *stats,
3169 uint64_t *stats_mask,
3172 struct pmd_internals *p = dev->data->dev_private;
3174 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3176 struct tm_node *ns = np->parent_node;
3177 uint32_t subport_id = tm_node_subport_id(dev, ns);
3182 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3183 struct rte_sched_queue_stats s;
3186 uint32_t qid = tm_port_queue_id(dev,
3189 i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3190 i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3192 int status = rte_sched_queue_read_stats(
3200 /* Stats accumulate */
3201 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3202 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3203 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3204 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3206 np->stats.leaf.n_pkts_queued = qlen;
3211 memcpy(stats, &np->stats, sizeof(*stats));
3214 *stats_mask = STATS_MASK_DEFAULT;
3218 memset(&np->stats, 0, sizeof(np->stats));
3224 read_tc_stats(struct rte_eth_dev *dev,
3226 struct rte_tm_node_stats *stats,
3227 uint64_t *stats_mask,
3230 struct pmd_internals *p = dev->data->dev_private;
3232 uint32_t tc_id = tm_node_tc_id(dev, nt);
3234 struct tm_node *np = nt->parent_node;
3235 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3237 struct tm_node *ns = np->parent_node;
3238 uint32_t subport_id = tm_node_subport_id(dev, ns);
3243 for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3244 struct rte_sched_queue_stats s;
3247 uint32_t qid = tm_port_queue_id(dev,
3253 int status = rte_sched_queue_read_stats(
3261 /* Stats accumulate */
3262 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3263 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3264 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3265 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3267 nt->stats.leaf.n_pkts_queued = qlen;
3272 memcpy(stats, &nt->stats, sizeof(*stats));
3275 *stats_mask = STATS_MASK_DEFAULT;
3279 memset(&nt->stats, 0, sizeof(nt->stats));
3285 read_queue_stats(struct rte_eth_dev *dev,
3287 struct rte_tm_node_stats *stats,
3288 uint64_t *stats_mask,
3291 struct pmd_internals *p = dev->data->dev_private;
3292 struct rte_sched_queue_stats s;
3295 uint32_t queue_id = tm_node_queue_id(dev, nq);
3297 struct tm_node *nt = nq->parent_node;
3298 uint32_t tc_id = tm_node_tc_id(dev, nt);
3300 struct tm_node *np = nt->parent_node;
3301 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3303 struct tm_node *ns = np->parent_node;
3304 uint32_t subport_id = tm_node_subport_id(dev, ns);
3307 uint32_t qid = tm_port_queue_id(dev,
3313 int status = rte_sched_queue_read_stats(
3321 /* Stats accumulate */
3322 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3323 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3324 nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3325 nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3327 nq->stats.leaf.n_pkts_queued = qlen;
3331 memcpy(stats, &nq->stats, sizeof(*stats));
3334 *stats_mask = STATS_MASK_QUEUE;
3338 memset(&nq->stats, 0, sizeof(nq->stats));
3343 /* Traffic manager read stats counters for specific node */
3345 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3347 struct rte_tm_node_stats *stats,
3348 uint64_t *stats_mask,
3350 struct rte_tm_error *error)
3354 /* Port must be started and TM used. */
3355 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3356 return -rte_tm_error_set(error,
3358 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3360 rte_strerror(EBUSY));
3362 /* Node must be valid */
3363 n = tm_node_search(dev, node_id);
3365 return -rte_tm_error_set(error,
3367 RTE_TM_ERROR_TYPE_NODE_ID,
3369 rte_strerror(EINVAL));
3372 case TM_NODE_LEVEL_PORT:
3373 if (read_port_stats(dev, n, stats, stats_mask, clear))
3374 return -rte_tm_error_set(error,
3376 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3378 rte_strerror(EINVAL));
3381 case TM_NODE_LEVEL_SUBPORT:
3382 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3383 return -rte_tm_error_set(error,
3385 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3387 rte_strerror(EINVAL));
3390 case TM_NODE_LEVEL_PIPE:
3391 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3392 return -rte_tm_error_set(error,
3394 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3396 rte_strerror(EINVAL));
3399 case TM_NODE_LEVEL_TC:
3400 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3401 return -rte_tm_error_set(error,
3403 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3405 rte_strerror(EINVAL));
3408 case TM_NODE_LEVEL_QUEUE:
3410 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3411 return -rte_tm_error_set(error,
3413 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3415 rte_strerror(EINVAL));
3420 const struct rte_tm_ops pmd_tm_ops = {
3421 .node_type_get = pmd_tm_node_type_get,
3422 .capabilities_get = pmd_tm_capabilities_get,
3423 .level_capabilities_get = pmd_tm_level_capabilities_get,
3424 .node_capabilities_get = pmd_tm_node_capabilities_get,
3426 .wred_profile_add = pmd_tm_wred_profile_add,
3427 .wred_profile_delete = pmd_tm_wred_profile_delete,
3428 .shared_wred_context_add_update = NULL,
3429 .shared_wred_context_delete = NULL,
3431 .shaper_profile_add = pmd_tm_shaper_profile_add,
3432 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3433 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3434 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3436 .node_add = pmd_tm_node_add,
3437 .node_delete = pmd_tm_node_delete,
3438 .node_suspend = NULL,
3439 .node_resume = NULL,
3440 .hierarchy_commit = pmd_tm_hierarchy_commit,
3442 .node_parent_update = pmd_tm_node_parent_update,
3443 .node_shaper_update = pmd_tm_node_shaper_update,
3444 .node_shared_shaper_update = NULL,
3445 .node_stats_update = NULL,
3446 .node_wfq_weight_mode_update = NULL,
3447 .node_cman_update = NULL,
3448 .node_wred_context_update = NULL,
3449 .node_shared_wred_context_update = NULL,
3451 .node_stats_read = pmd_tm_node_stats_read,