1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
11 #include "rte_eth_softnic_internals.h"
12 #include "rte_eth_softnic.h"
14 #define BYTES_IN_MBPS (1000 * 1000 / 8)
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
21 uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
25 if (params->soft.tm.rate) {
26 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
29 params->soft.tm.rate =
30 (hard_rate_bytes_per_sec > UINT32_MAX) ?
31 UINT32_MAX : hard_rate_bytes_per_sec;
35 if (params->soft.tm.nb_queues == 0)
38 if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
39 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
41 params->soft.tm.nb_queues =
42 rte_align32pow2(params->soft.tm.nb_queues);
45 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
46 if (params->soft.tm.qsize[i] == 0)
49 params->soft.tm.qsize[i] =
50 rte_align32pow2(params->soft.tm.qsize[i]);
53 /* enq_bsz, deq_bsz */
54 if (params->soft.tm.enq_bsz == 0 ||
55 params->soft.tm.deq_bsz == 0 ||
56 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
63 tm_hierarchy_init(struct pmd_internals *p)
65 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
67 /* Initialize shaper profile list */
68 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
70 /* Initialize shared shaper list */
71 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
73 /* Initialize wred profile list */
74 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
76 /* Initialize TM node list */
77 TAILQ_INIT(&p->soft.tm.h.nodes);
81 tm_hierarchy_uninit(struct pmd_internals *p)
85 struct tm_node *tm_node;
87 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
91 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
95 /* Remove all WRED profiles */
97 struct tm_wred_profile *wred_profile;
99 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
100 if (wred_profile == NULL)
103 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
107 /* Remove all shared shapers */
109 struct tm_shared_shaper *shared_shaper;
111 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
112 if (shared_shaper == NULL)
115 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
119 /* Remove all shaper profiles */
121 struct tm_shaper_profile *shaper_profile;
123 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
124 if (shaper_profile == NULL)
127 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
128 shaper_profile, node);
129 free(shaper_profile);
132 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
136 tm_init(struct pmd_internals *p,
137 struct pmd_params *params,
140 uint32_t enq_bsz = params->soft.tm.enq_bsz;
141 uint32_t deq_bsz = params->soft.tm.deq_bsz;
143 p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
144 2 * enq_bsz * sizeof(struct rte_mbuf *),
148 if (p->soft.tm.pkts_enq == NULL)
151 p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
152 deq_bsz * sizeof(struct rte_mbuf *),
156 if (p->soft.tm.pkts_deq == NULL) {
157 rte_free(p->soft.tm.pkts_enq);
161 tm_hierarchy_init(p);
167 tm_free(struct pmd_internals *p)
169 tm_hierarchy_uninit(p);
170 rte_free(p->soft.tm.pkts_enq);
171 rte_free(p->soft.tm.pkts_deq);
175 tm_start(struct pmd_internals *p)
177 struct tm_params *t = &p->soft.tm.params;
178 uint32_t n_subports, subport_id;
181 /* Is hierarchy frozen? */
182 if (p->soft.tm.hierarchy_frozen == 0)
186 p->soft.tm.sched = rte_sched_port_config(&t->port_params);
187 if (p->soft.tm.sched == NULL)
191 n_subports = t->port_params.n_subports_per_port;
192 for (subport_id = 0; subport_id < n_subports; subport_id++) {
193 uint32_t n_pipes_per_subport =
194 t->port_params.n_pipes_per_subport;
197 status = rte_sched_subport_config(p->soft.tm.sched,
199 &t->subport_params[subport_id]);
201 rte_sched_port_free(p->soft.tm.sched);
206 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
207 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
208 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
210 int profile_id = t->pipe_to_profile[pos];
215 status = rte_sched_pipe_config(p->soft.tm.sched,
220 rte_sched_port_free(p->soft.tm.sched);
230 tm_stop(struct pmd_internals *p)
232 if (p->soft.tm.sched)
233 rte_sched_port_free(p->soft.tm.sched);
235 /* Unfreeze hierarchy */
236 p->soft.tm.hierarchy_frozen = 0;
239 static struct tm_shaper_profile *
240 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
242 struct pmd_internals *p = dev->data->dev_private;
243 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
244 struct tm_shaper_profile *sp;
246 TAILQ_FOREACH(sp, spl, node)
247 if (shaper_profile_id == sp->shaper_profile_id)
253 static struct tm_shared_shaper *
254 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
256 struct pmd_internals *p = dev->data->dev_private;
257 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
258 struct tm_shared_shaper *ss;
260 TAILQ_FOREACH(ss, ssl, node)
261 if (shared_shaper_id == ss->shared_shaper_id)
267 static struct tm_wred_profile *
268 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
270 struct pmd_internals *p = dev->data->dev_private;
271 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
272 struct tm_wred_profile *wp;
274 TAILQ_FOREACH(wp, wpl, node)
275 if (wred_profile_id == wp->wred_profile_id)
281 static struct tm_node *
282 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
284 struct pmd_internals *p = dev->data->dev_private;
285 struct tm_node_list *nl = &p->soft.tm.h.nodes;
288 TAILQ_FOREACH(n, nl, node)
289 if (n->node_id == node_id)
295 static struct tm_node *
296 tm_root_node_present(struct rte_eth_dev *dev)
298 struct pmd_internals *p = dev->data->dev_private;
299 struct tm_node_list *nl = &p->soft.tm.h.nodes;
302 TAILQ_FOREACH(n, nl, node)
303 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
310 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
312 struct pmd_internals *p = dev->data->dev_private;
313 struct tm_node_list *nl = &p->soft.tm.h.nodes;
318 TAILQ_FOREACH(ns, nl, node) {
319 if (ns->level != TM_NODE_LEVEL_SUBPORT)
322 if (ns->node_id == subport_node->node_id)
332 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
334 struct pmd_internals *p = dev->data->dev_private;
335 struct tm_node_list *nl = &p->soft.tm.h.nodes;
340 TAILQ_FOREACH(np, nl, node) {
341 if (np->level != TM_NODE_LEVEL_PIPE ||
342 np->parent_node_id != pipe_node->parent_node_id)
345 if (np->node_id == pipe_node->node_id)
355 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
357 return tc_node->priority;
361 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
363 struct pmd_internals *p = dev->data->dev_private;
364 struct tm_node_list *nl = &p->soft.tm.h.nodes;
369 TAILQ_FOREACH(nq, nl, node) {
370 if (nq->level != TM_NODE_LEVEL_QUEUE ||
371 nq->parent_node_id != queue_node->parent_node_id)
374 if (nq->node_id == queue_node->node_id)
384 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
386 struct pmd_internals *p = dev->data->dev_private;
387 uint32_t n_queues_max = p->params.soft.tm.nb_queues;
388 uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
389 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
390 uint32_t n_subports_max = n_pipes_max;
391 uint32_t n_root_max = 1;
394 case TM_NODE_LEVEL_PORT:
396 case TM_NODE_LEVEL_SUBPORT:
397 return n_subports_max;
398 case TM_NODE_LEVEL_PIPE:
400 case TM_NODE_LEVEL_TC:
402 case TM_NODE_LEVEL_QUEUE:
408 /* Traffic manager node type get */
410 pmd_tm_node_type_get(struct rte_eth_dev *dev,
413 struct rte_tm_error *error)
415 struct pmd_internals *p = dev->data->dev_private;
418 return -rte_tm_error_set(error,
420 RTE_TM_ERROR_TYPE_UNSPECIFIED,
422 rte_strerror(EINVAL));
424 if (node_id == RTE_TM_NODE_ID_NULL ||
425 (tm_node_search(dev, node_id) == NULL))
426 return -rte_tm_error_set(error,
428 RTE_TM_ERROR_TYPE_NODE_ID,
430 rte_strerror(EINVAL));
432 *is_leaf = node_id < p->params.soft.tm.nb_queues;
438 #define WRED_SUPPORTED 1
440 #define WRED_SUPPORTED 0
443 #define STATS_MASK_DEFAULT \
444 (RTE_TM_STATS_N_PKTS | \
445 RTE_TM_STATS_N_BYTES | \
446 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
447 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
449 #define STATS_MASK_QUEUE \
450 (STATS_MASK_DEFAULT | \
451 RTE_TM_STATS_N_PKTS_QUEUED)
453 static const struct rte_tm_capabilities tm_cap = {
454 .n_nodes_max = UINT32_MAX,
455 .n_levels_max = TM_NODE_LEVEL_MAX,
457 .non_leaf_nodes_identical = 0,
458 .leaf_nodes_identical = 1,
460 .shaper_n_max = UINT32_MAX,
461 .shaper_private_n_max = UINT32_MAX,
462 .shaper_private_dual_rate_n_max = 0,
463 .shaper_private_rate_min = 1,
464 .shaper_private_rate_max = UINT32_MAX,
466 .shaper_shared_n_max = UINT32_MAX,
467 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
468 .shaper_shared_n_shapers_per_node_max = 1,
469 .shaper_shared_dual_rate_n_max = 0,
470 .shaper_shared_rate_min = 1,
471 .shaper_shared_rate_max = UINT32_MAX,
473 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
474 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
476 .sched_n_children_max = UINT32_MAX,
477 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
478 .sched_wfq_n_children_per_group_max = UINT32_MAX,
479 .sched_wfq_n_groups_max = 1,
480 .sched_wfq_weight_max = UINT32_MAX,
482 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
483 .cman_wred_byte_mode_supported = 0,
484 .cman_head_drop_supported = 0,
485 .cman_wred_context_n_max = 0,
486 .cman_wred_context_private_n_max = 0,
487 .cman_wred_context_shared_n_max = 0,
488 .cman_wred_context_shared_n_nodes_per_context_max = 0,
489 .cman_wred_context_shared_n_contexts_per_node_max = 0,
491 .mark_vlan_dei_supported = {0, 0, 0},
492 .mark_ip_ecn_tcp_supported = {0, 0, 0},
493 .mark_ip_ecn_sctp_supported = {0, 0, 0},
494 .mark_ip_dscp_supported = {0, 0, 0},
496 .dynamic_update_mask = 0,
498 .stats_mask = STATS_MASK_QUEUE,
501 /* Traffic manager capabilities get */
503 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
504 struct rte_tm_capabilities *cap,
505 struct rte_tm_error *error)
508 return -rte_tm_error_set(error,
510 RTE_TM_ERROR_TYPE_CAPABILITIES,
512 rte_strerror(EINVAL));
514 memcpy(cap, &tm_cap, sizeof(*cap));
516 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
517 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
518 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
519 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
520 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
522 cap->shaper_private_n_max =
523 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
524 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
525 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
526 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
528 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
529 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
531 cap->shaper_n_max = cap->shaper_private_n_max +
532 cap->shaper_shared_n_max;
534 cap->shaper_shared_n_nodes_per_shaper_max =
535 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
537 cap->sched_n_children_max = RTE_MAX(
538 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
539 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
541 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
544 cap->cman_wred_context_private_n_max =
545 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
547 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
548 cap->cman_wred_context_shared_n_max;
553 static const struct rte_tm_level_capabilities tm_level_cap[] = {
554 [TM_NODE_LEVEL_PORT] = {
556 .n_nodes_nonleaf_max = 1,
557 .n_nodes_leaf_max = 0,
558 .non_leaf_nodes_identical = 1,
559 .leaf_nodes_identical = 0,
562 .shaper_private_supported = 1,
563 .shaper_private_dual_rate_supported = 0,
564 .shaper_private_rate_min = 1,
565 .shaper_private_rate_max = UINT32_MAX,
566 .shaper_shared_n_max = 0,
568 .sched_n_children_max = UINT32_MAX,
569 .sched_sp_n_priorities_max = 1,
570 .sched_wfq_n_children_per_group_max = UINT32_MAX,
571 .sched_wfq_n_groups_max = 1,
572 .sched_wfq_weight_max = 1,
574 .stats_mask = STATS_MASK_DEFAULT,
578 [TM_NODE_LEVEL_SUBPORT] = {
579 .n_nodes_max = UINT32_MAX,
580 .n_nodes_nonleaf_max = UINT32_MAX,
581 .n_nodes_leaf_max = 0,
582 .non_leaf_nodes_identical = 1,
583 .leaf_nodes_identical = 0,
586 .shaper_private_supported = 1,
587 .shaper_private_dual_rate_supported = 0,
588 .shaper_private_rate_min = 1,
589 .shaper_private_rate_max = UINT32_MAX,
590 .shaper_shared_n_max = 0,
592 .sched_n_children_max = UINT32_MAX,
593 .sched_sp_n_priorities_max = 1,
594 .sched_wfq_n_children_per_group_max = UINT32_MAX,
595 .sched_wfq_n_groups_max = 1,
596 #ifdef RTE_SCHED_SUBPORT_TC_OV
597 .sched_wfq_weight_max = UINT32_MAX,
599 .sched_wfq_weight_max = 1,
601 .stats_mask = STATS_MASK_DEFAULT,
605 [TM_NODE_LEVEL_PIPE] = {
606 .n_nodes_max = UINT32_MAX,
607 .n_nodes_nonleaf_max = UINT32_MAX,
608 .n_nodes_leaf_max = 0,
609 .non_leaf_nodes_identical = 1,
610 .leaf_nodes_identical = 0,
613 .shaper_private_supported = 1,
614 .shaper_private_dual_rate_supported = 0,
615 .shaper_private_rate_min = 1,
616 .shaper_private_rate_max = UINT32_MAX,
617 .shaper_shared_n_max = 0,
619 .sched_n_children_max =
620 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
621 .sched_sp_n_priorities_max =
622 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
623 .sched_wfq_n_children_per_group_max = 1,
624 .sched_wfq_n_groups_max = 0,
625 .sched_wfq_weight_max = 1,
627 .stats_mask = STATS_MASK_DEFAULT,
631 [TM_NODE_LEVEL_TC] = {
632 .n_nodes_max = UINT32_MAX,
633 .n_nodes_nonleaf_max = UINT32_MAX,
634 .n_nodes_leaf_max = 0,
635 .non_leaf_nodes_identical = 1,
636 .leaf_nodes_identical = 0,
639 .shaper_private_supported = 1,
640 .shaper_private_dual_rate_supported = 0,
641 .shaper_private_rate_min = 1,
642 .shaper_private_rate_max = UINT32_MAX,
643 .shaper_shared_n_max = 1,
645 .sched_n_children_max =
646 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
647 .sched_sp_n_priorities_max = 1,
648 .sched_wfq_n_children_per_group_max =
649 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
650 .sched_wfq_n_groups_max = 1,
651 .sched_wfq_weight_max = UINT32_MAX,
653 .stats_mask = STATS_MASK_DEFAULT,
657 [TM_NODE_LEVEL_QUEUE] = {
658 .n_nodes_max = UINT32_MAX,
659 .n_nodes_nonleaf_max = 0,
660 .n_nodes_leaf_max = UINT32_MAX,
661 .non_leaf_nodes_identical = 0,
662 .leaf_nodes_identical = 1,
665 .shaper_private_supported = 0,
666 .shaper_private_dual_rate_supported = 0,
667 .shaper_private_rate_min = 0,
668 .shaper_private_rate_max = 0,
669 .shaper_shared_n_max = 0,
671 .cman_head_drop_supported = 0,
672 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
673 .cman_wred_byte_mode_supported = 0,
674 .cman_wred_context_private_supported = WRED_SUPPORTED,
675 .cman_wred_context_shared_n_max = 0,
677 .stats_mask = STATS_MASK_QUEUE,
682 /* Traffic manager level capabilities get */
684 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
686 struct rte_tm_level_capabilities *cap,
687 struct rte_tm_error *error)
690 return -rte_tm_error_set(error,
692 RTE_TM_ERROR_TYPE_CAPABILITIES,
694 rte_strerror(EINVAL));
696 if (level_id >= TM_NODE_LEVEL_MAX)
697 return -rte_tm_error_set(error,
699 RTE_TM_ERROR_TYPE_LEVEL_ID,
701 rte_strerror(EINVAL));
703 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
706 case TM_NODE_LEVEL_PORT:
707 cap->nonleaf.sched_n_children_max =
708 tm_level_get_max_nodes(dev,
709 TM_NODE_LEVEL_SUBPORT);
710 cap->nonleaf.sched_wfq_n_children_per_group_max =
711 cap->nonleaf.sched_n_children_max;
714 case TM_NODE_LEVEL_SUBPORT:
715 cap->n_nodes_max = tm_level_get_max_nodes(dev,
716 TM_NODE_LEVEL_SUBPORT);
717 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
718 cap->nonleaf.sched_n_children_max =
719 tm_level_get_max_nodes(dev,
721 cap->nonleaf.sched_wfq_n_children_per_group_max =
722 cap->nonleaf.sched_n_children_max;
725 case TM_NODE_LEVEL_PIPE:
726 cap->n_nodes_max = tm_level_get_max_nodes(dev,
728 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
731 case TM_NODE_LEVEL_TC:
732 cap->n_nodes_max = tm_level_get_max_nodes(dev,
734 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
737 case TM_NODE_LEVEL_QUEUE:
739 cap->n_nodes_max = tm_level_get_max_nodes(dev,
740 TM_NODE_LEVEL_QUEUE);
741 cap->n_nodes_leaf_max = cap->n_nodes_max;
748 static const struct rte_tm_node_capabilities tm_node_cap[] = {
749 [TM_NODE_LEVEL_PORT] = {
750 .shaper_private_supported = 1,
751 .shaper_private_dual_rate_supported = 0,
752 .shaper_private_rate_min = 1,
753 .shaper_private_rate_max = UINT32_MAX,
754 .shaper_shared_n_max = 0,
757 .sched_n_children_max = UINT32_MAX,
758 .sched_sp_n_priorities_max = 1,
759 .sched_wfq_n_children_per_group_max = UINT32_MAX,
760 .sched_wfq_n_groups_max = 1,
761 .sched_wfq_weight_max = 1,
764 .stats_mask = STATS_MASK_DEFAULT,
767 [TM_NODE_LEVEL_SUBPORT] = {
768 .shaper_private_supported = 1,
769 .shaper_private_dual_rate_supported = 0,
770 .shaper_private_rate_min = 1,
771 .shaper_private_rate_max = UINT32_MAX,
772 .shaper_shared_n_max = 0,
775 .sched_n_children_max = UINT32_MAX,
776 .sched_sp_n_priorities_max = 1,
777 .sched_wfq_n_children_per_group_max = UINT32_MAX,
778 .sched_wfq_n_groups_max = 1,
779 .sched_wfq_weight_max = UINT32_MAX,
782 .stats_mask = STATS_MASK_DEFAULT,
785 [TM_NODE_LEVEL_PIPE] = {
786 .shaper_private_supported = 1,
787 .shaper_private_dual_rate_supported = 0,
788 .shaper_private_rate_min = 1,
789 .shaper_private_rate_max = UINT32_MAX,
790 .shaper_shared_n_max = 0,
793 .sched_n_children_max =
794 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
795 .sched_sp_n_priorities_max =
796 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
797 .sched_wfq_n_children_per_group_max = 1,
798 .sched_wfq_n_groups_max = 0,
799 .sched_wfq_weight_max = 1,
802 .stats_mask = STATS_MASK_DEFAULT,
805 [TM_NODE_LEVEL_TC] = {
806 .shaper_private_supported = 1,
807 .shaper_private_dual_rate_supported = 0,
808 .shaper_private_rate_min = 1,
809 .shaper_private_rate_max = UINT32_MAX,
810 .shaper_shared_n_max = 1,
813 .sched_n_children_max =
814 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
815 .sched_sp_n_priorities_max = 1,
816 .sched_wfq_n_children_per_group_max =
817 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
818 .sched_wfq_n_groups_max = 1,
819 .sched_wfq_weight_max = UINT32_MAX,
822 .stats_mask = STATS_MASK_DEFAULT,
825 [TM_NODE_LEVEL_QUEUE] = {
826 .shaper_private_supported = 0,
827 .shaper_private_dual_rate_supported = 0,
828 .shaper_private_rate_min = 0,
829 .shaper_private_rate_max = 0,
830 .shaper_shared_n_max = 0,
834 .cman_head_drop_supported = 0,
835 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
836 .cman_wred_byte_mode_supported = 0,
837 .cman_wred_context_private_supported = WRED_SUPPORTED,
838 .cman_wred_context_shared_n_max = 0,
841 .stats_mask = STATS_MASK_QUEUE,
845 /* Traffic manager node capabilities get */
847 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
849 struct rte_tm_node_capabilities *cap,
850 struct rte_tm_error *error)
852 struct tm_node *tm_node;
855 return -rte_tm_error_set(error,
857 RTE_TM_ERROR_TYPE_CAPABILITIES,
859 rte_strerror(EINVAL));
861 tm_node = tm_node_search(dev, node_id);
863 return -rte_tm_error_set(error,
865 RTE_TM_ERROR_TYPE_NODE_ID,
867 rte_strerror(EINVAL));
869 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
871 switch (tm_node->level) {
872 case TM_NODE_LEVEL_PORT:
873 cap->nonleaf.sched_n_children_max =
874 tm_level_get_max_nodes(dev,
875 TM_NODE_LEVEL_SUBPORT);
876 cap->nonleaf.sched_wfq_n_children_per_group_max =
877 cap->nonleaf.sched_n_children_max;
880 case TM_NODE_LEVEL_SUBPORT:
881 cap->nonleaf.sched_n_children_max =
882 tm_level_get_max_nodes(dev,
884 cap->nonleaf.sched_wfq_n_children_per_group_max =
885 cap->nonleaf.sched_n_children_max;
888 case TM_NODE_LEVEL_PIPE:
889 case TM_NODE_LEVEL_TC:
890 case TM_NODE_LEVEL_QUEUE:
899 shaper_profile_check(struct rte_eth_dev *dev,
900 uint32_t shaper_profile_id,
901 struct rte_tm_shaper_params *profile,
902 struct rte_tm_error *error)
904 struct tm_shaper_profile *sp;
906 /* Shaper profile ID must not be NONE. */
907 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
908 return -rte_tm_error_set(error,
910 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
912 rte_strerror(EINVAL));
914 /* Shaper profile must not exist. */
915 sp = tm_shaper_profile_search(dev, shaper_profile_id);
917 return -rte_tm_error_set(error,
919 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
921 rte_strerror(EEXIST));
923 /* Profile must not be NULL. */
925 return -rte_tm_error_set(error,
927 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
929 rte_strerror(EINVAL));
931 /* Peak rate: non-zero, 32-bit */
932 if (profile->peak.rate == 0 ||
933 profile->peak.rate >= UINT32_MAX)
934 return -rte_tm_error_set(error,
936 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
938 rte_strerror(EINVAL));
940 /* Peak size: non-zero, 32-bit */
941 if (profile->peak.size == 0 ||
942 profile->peak.size >= UINT32_MAX)
943 return -rte_tm_error_set(error,
945 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
947 rte_strerror(EINVAL));
949 /* Dual-rate profiles are not supported. */
950 if (profile->committed.rate != 0)
951 return -rte_tm_error_set(error,
953 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
955 rte_strerror(EINVAL));
957 /* Packet length adjust: 24 bytes */
958 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
959 return -rte_tm_error_set(error,
961 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
963 rte_strerror(EINVAL));
968 /* Traffic manager shaper profile add */
970 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
971 uint32_t shaper_profile_id,
972 struct rte_tm_shaper_params *profile,
973 struct rte_tm_error *error)
975 struct pmd_internals *p = dev->data->dev_private;
976 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
977 struct tm_shaper_profile *sp;
980 /* Check input params */
981 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
985 /* Memory allocation */
986 sp = calloc(1, sizeof(struct tm_shaper_profile));
988 return -rte_tm_error_set(error,
990 RTE_TM_ERROR_TYPE_UNSPECIFIED,
992 rte_strerror(ENOMEM));
995 sp->shaper_profile_id = shaper_profile_id;
996 memcpy(&sp->params, profile, sizeof(sp->params));
999 TAILQ_INSERT_TAIL(spl, sp, node);
1000 p->soft.tm.h.n_shaper_profiles++;
1005 /* Traffic manager shaper profile delete */
1007 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1008 uint32_t shaper_profile_id,
1009 struct rte_tm_error *error)
1011 struct pmd_internals *p = dev->data->dev_private;
1012 struct tm_shaper_profile *sp;
1014 /* Check existing */
1015 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1017 return -rte_tm_error_set(error,
1019 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1021 rte_strerror(EINVAL));
1025 return -rte_tm_error_set(error,
1027 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1029 rte_strerror(EBUSY));
1031 /* Remove from list */
1032 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1033 p->soft.tm.h.n_shaper_profiles--;
1039 static struct tm_node *
1040 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1041 struct tm_shared_shaper *ss)
1043 struct pmd_internals *p = dev->data->dev_private;
1044 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1047 /* Subport: each TC uses shared shaper */
1048 TAILQ_FOREACH(n, nl, node) {
1049 if (n->level != TM_NODE_LEVEL_TC ||
1050 n->params.n_shared_shapers == 0 ||
1051 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1061 update_subport_tc_rate(struct rte_eth_dev *dev,
1063 struct tm_shared_shaper *ss,
1064 struct tm_shaper_profile *sp_new)
1066 struct pmd_internals *p = dev->data->dev_private;
1067 uint32_t tc_id = tm_node_tc_id(dev, nt);
1069 struct tm_node *np = nt->parent_node;
1071 struct tm_node *ns = np->parent_node;
1072 uint32_t subport_id = tm_node_subport_id(dev, ns);
1074 struct rte_sched_subport_params subport_params;
1076 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1077 ss->shaper_profile_id);
1079 /* Derive new subport configuration. */
1080 memcpy(&subport_params,
1081 &p->soft.tm.params.subport_params[subport_id],
1082 sizeof(subport_params));
1083 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1085 /* Update the subport configuration. */
1086 if (rte_sched_subport_config(p->soft.tm.sched,
1087 subport_id, &subport_params))
1090 /* Commit changes. */
1093 ss->shaper_profile_id = sp_new->shaper_profile_id;
1096 memcpy(&p->soft.tm.params.subport_params[subport_id],
1098 sizeof(subport_params));
1103 /* Traffic manager shared shaper add/update */
1105 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1106 uint32_t shared_shaper_id,
1107 uint32_t shaper_profile_id,
1108 struct rte_tm_error *error)
1110 struct pmd_internals *p = dev->data->dev_private;
1111 struct tm_shared_shaper *ss;
1112 struct tm_shaper_profile *sp;
1115 /* Shaper profile must be valid. */
1116 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1118 return -rte_tm_error_set(error,
1120 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1122 rte_strerror(EINVAL));
1125 * Add new shared shaper
1127 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1129 struct tm_shared_shaper_list *ssl =
1130 &p->soft.tm.h.shared_shapers;
1132 /* Hierarchy must not be frozen */
1133 if (p->soft.tm.hierarchy_frozen)
1134 return -rte_tm_error_set(error,
1136 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1138 rte_strerror(EBUSY));
1140 /* Memory allocation */
1141 ss = calloc(1, sizeof(struct tm_shared_shaper));
1143 return -rte_tm_error_set(error,
1145 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1147 rte_strerror(ENOMEM));
1150 ss->shared_shaper_id = shared_shaper_id;
1151 ss->shaper_profile_id = shaper_profile_id;
1154 TAILQ_INSERT_TAIL(ssl, ss, node);
1155 p->soft.tm.h.n_shared_shapers++;
1161 * Update existing shared shaper
1163 /* Hierarchy must be frozen (run-time update) */
1164 if (p->soft.tm.hierarchy_frozen == 0)
1165 return -rte_tm_error_set(error,
1167 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1169 rte_strerror(EBUSY));
1172 /* Propagate change. */
1173 nt = tm_shared_shaper_get_tc(dev, ss);
1174 if (update_subport_tc_rate(dev, nt, ss, sp))
1175 return -rte_tm_error_set(error,
1177 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1179 rte_strerror(EINVAL));
1184 /* Traffic manager shared shaper delete */
1186 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1187 uint32_t shared_shaper_id,
1188 struct rte_tm_error *error)
1190 struct pmd_internals *p = dev->data->dev_private;
1191 struct tm_shared_shaper *ss;
1193 /* Check existing */
1194 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1196 return -rte_tm_error_set(error,
1198 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1200 rte_strerror(EINVAL));
1204 return -rte_tm_error_set(error,
1206 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1208 rte_strerror(EBUSY));
1210 /* Remove from list */
1211 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1212 p->soft.tm.h.n_shared_shapers--;
1219 wred_profile_check(struct rte_eth_dev *dev,
1220 uint32_t wred_profile_id,
1221 struct rte_tm_wred_params *profile,
1222 struct rte_tm_error *error)
1224 struct tm_wred_profile *wp;
1225 enum rte_tm_color color;
1227 /* WRED profile ID must not be NONE. */
1228 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1229 return -rte_tm_error_set(error,
1231 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1233 rte_strerror(EINVAL));
1235 /* WRED profile must not exist. */
1236 wp = tm_wred_profile_search(dev, wred_profile_id);
1238 return -rte_tm_error_set(error,
1240 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1242 rte_strerror(EEXIST));
1244 /* Profile must not be NULL. */
1245 if (profile == NULL)
1246 return -rte_tm_error_set(error,
1248 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1250 rte_strerror(EINVAL));
1252 /* WRED profile should be in packet mode */
1253 if (profile->packet_mode == 0)
1254 return -rte_tm_error_set(error,
1256 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1258 rte_strerror(ENOTSUP));
1260 /* min_th <= max_th, max_th > 0 */
1261 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1262 uint32_t min_th = profile->red_params[color].min_th;
1263 uint32_t max_th = profile->red_params[color].max_th;
1265 if (min_th > max_th ||
1267 min_th > UINT16_MAX ||
1268 max_th > UINT16_MAX)
1269 return -rte_tm_error_set(error,
1271 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1273 rte_strerror(EINVAL));
1279 /* Traffic manager WRED profile add */
1281 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1282 uint32_t wred_profile_id,
1283 struct rte_tm_wred_params *profile,
1284 struct rte_tm_error *error)
1286 struct pmd_internals *p = dev->data->dev_private;
1287 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1288 struct tm_wred_profile *wp;
1291 /* Check input params */
1292 status = wred_profile_check(dev, wred_profile_id, profile, error);
1296 /* Memory allocation */
1297 wp = calloc(1, sizeof(struct tm_wred_profile));
1299 return -rte_tm_error_set(error,
1301 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1303 rte_strerror(ENOMEM));
1306 wp->wred_profile_id = wred_profile_id;
1307 memcpy(&wp->params, profile, sizeof(wp->params));
1310 TAILQ_INSERT_TAIL(wpl, wp, node);
1311 p->soft.tm.h.n_wred_profiles++;
1316 /* Traffic manager WRED profile delete */
1318 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1319 uint32_t wred_profile_id,
1320 struct rte_tm_error *error)
1322 struct pmd_internals *p = dev->data->dev_private;
1323 struct tm_wred_profile *wp;
1325 /* Check existing */
1326 wp = tm_wred_profile_search(dev, wred_profile_id);
1328 return -rte_tm_error_set(error,
1330 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1332 rte_strerror(EINVAL));
1336 return -rte_tm_error_set(error,
1338 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1340 rte_strerror(EBUSY));
1342 /* Remove from list */
1343 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1344 p->soft.tm.h.n_wred_profiles--;
1351 node_add_check_port(struct rte_eth_dev *dev,
1353 uint32_t parent_node_id __rte_unused,
1356 uint32_t level_id __rte_unused,
1357 struct rte_tm_node_params *params,
1358 struct rte_tm_error *error)
1360 struct pmd_internals *p = dev->data->dev_private;
1361 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1362 params->shaper_profile_id);
1364 /* node type: non-leaf */
1365 if (node_id < p->params.soft.tm.nb_queues)
1366 return -rte_tm_error_set(error,
1368 RTE_TM_ERROR_TYPE_NODE_ID,
1370 rte_strerror(EINVAL));
1372 /* Priority must be 0 */
1374 return -rte_tm_error_set(error,
1376 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1378 rte_strerror(EINVAL));
1380 /* Weight must be 1 */
1382 return -rte_tm_error_set(error,
1384 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1386 rte_strerror(EINVAL));
1388 /* Shaper must be valid.
1389 * Shaper profile peak rate must fit the configured port rate.
1391 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1393 sp->params.peak.rate > p->params.soft.tm.rate)
1394 return -rte_tm_error_set(error,
1396 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1398 rte_strerror(EINVAL));
1400 /* No shared shapers */
1401 if (params->n_shared_shapers != 0)
1402 return -rte_tm_error_set(error,
1404 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1406 rte_strerror(EINVAL));
1408 /* Number of SP priorities must be 1 */
1409 if (params->nonleaf.n_sp_priorities != 1)
1410 return -rte_tm_error_set(error,
1412 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1414 rte_strerror(EINVAL));
1417 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1418 return -rte_tm_error_set(error,
1420 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1422 rte_strerror(EINVAL));
1428 node_add_check_subport(struct rte_eth_dev *dev,
1430 uint32_t parent_node_id __rte_unused,
1433 uint32_t level_id __rte_unused,
1434 struct rte_tm_node_params *params,
1435 struct rte_tm_error *error)
1437 struct pmd_internals *p = dev->data->dev_private;
1439 /* node type: non-leaf */
1440 if (node_id < p->params.soft.tm.nb_queues)
1441 return -rte_tm_error_set(error,
1443 RTE_TM_ERROR_TYPE_NODE_ID,
1445 rte_strerror(EINVAL));
1447 /* Priority must be 0 */
1449 return -rte_tm_error_set(error,
1451 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1453 rte_strerror(EINVAL));
1455 /* Weight must be 1 */
1457 return -rte_tm_error_set(error,
1459 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1461 rte_strerror(EINVAL));
1463 /* Shaper must be valid */
1464 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1465 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1466 return -rte_tm_error_set(error,
1468 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1470 rte_strerror(EINVAL));
1472 /* No shared shapers */
1473 if (params->n_shared_shapers != 0)
1474 return -rte_tm_error_set(error,
1476 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1478 rte_strerror(EINVAL));
1480 /* Number of SP priorities must be 1 */
1481 if (params->nonleaf.n_sp_priorities != 1)
1482 return -rte_tm_error_set(error,
1484 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1486 rte_strerror(EINVAL));
1489 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1490 return -rte_tm_error_set(error,
1492 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1494 rte_strerror(EINVAL));
1500 node_add_check_pipe(struct rte_eth_dev *dev,
1502 uint32_t parent_node_id __rte_unused,
1504 uint32_t weight __rte_unused,
1505 uint32_t level_id __rte_unused,
1506 struct rte_tm_node_params *params,
1507 struct rte_tm_error *error)
1509 struct pmd_internals *p = dev->data->dev_private;
1511 /* node type: non-leaf */
1512 if (node_id < p->params.soft.tm.nb_queues)
1513 return -rte_tm_error_set(error,
1515 RTE_TM_ERROR_TYPE_NODE_ID,
1517 rte_strerror(EINVAL));
1519 /* Priority must be 0 */
1521 return -rte_tm_error_set(error,
1523 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1525 rte_strerror(EINVAL));
1527 /* Shaper must be valid */
1528 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1529 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1530 return -rte_tm_error_set(error,
1532 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1534 rte_strerror(EINVAL));
1536 /* No shared shapers */
1537 if (params->n_shared_shapers != 0)
1538 return -rte_tm_error_set(error,
1540 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1542 rte_strerror(EINVAL));
1544 /* Number of SP priorities must be 4 */
1545 if (params->nonleaf.n_sp_priorities !=
1546 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1547 return -rte_tm_error_set(error,
1549 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1551 rte_strerror(EINVAL));
1553 /* WFQ mode must be byte mode */
1554 if (params->nonleaf.wfq_weight_mode != NULL &&
1555 params->nonleaf.wfq_weight_mode[0] != 0 &&
1556 params->nonleaf.wfq_weight_mode[1] != 0 &&
1557 params->nonleaf.wfq_weight_mode[2] != 0 &&
1558 params->nonleaf.wfq_weight_mode[3] != 0)
1559 return -rte_tm_error_set(error,
1561 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1563 rte_strerror(EINVAL));
1566 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1567 return -rte_tm_error_set(error,
1569 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1571 rte_strerror(EINVAL));
1577 node_add_check_tc(struct rte_eth_dev *dev,
1579 uint32_t parent_node_id __rte_unused,
1580 uint32_t priority __rte_unused,
1582 uint32_t level_id __rte_unused,
1583 struct rte_tm_node_params *params,
1584 struct rte_tm_error *error)
1586 struct pmd_internals *p = dev->data->dev_private;
1588 /* node type: non-leaf */
1589 if (node_id < p->params.soft.tm.nb_queues)
1590 return -rte_tm_error_set(error,
1592 RTE_TM_ERROR_TYPE_NODE_ID,
1594 rte_strerror(EINVAL));
1596 /* Weight must be 1 */
1598 return -rte_tm_error_set(error,
1600 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1602 rte_strerror(EINVAL));
1604 /* Shaper must be valid */
1605 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1606 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1607 return -rte_tm_error_set(error,
1609 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1611 rte_strerror(EINVAL));
1613 /* Single valid shared shaper */
1614 if (params->n_shared_shapers > 1)
1615 return -rte_tm_error_set(error,
1617 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1619 rte_strerror(EINVAL));
1621 if (params->n_shared_shapers == 1 &&
1622 (params->shared_shaper_id == NULL ||
1623 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1624 return -rte_tm_error_set(error,
1626 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1628 rte_strerror(EINVAL));
1630 /* Number of priorities must be 1 */
1631 if (params->nonleaf.n_sp_priorities != 1)
1632 return -rte_tm_error_set(error,
1634 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1636 rte_strerror(EINVAL));
1639 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1640 return -rte_tm_error_set(error,
1642 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1644 rte_strerror(EINVAL));
1650 node_add_check_queue(struct rte_eth_dev *dev,
1652 uint32_t parent_node_id __rte_unused,
1654 uint32_t weight __rte_unused,
1655 uint32_t level_id __rte_unused,
1656 struct rte_tm_node_params *params,
1657 struct rte_tm_error *error)
1659 struct pmd_internals *p = dev->data->dev_private;
1661 /* node type: leaf */
1662 if (node_id >= p->params.soft.tm.nb_queues)
1663 return -rte_tm_error_set(error,
1665 RTE_TM_ERROR_TYPE_NODE_ID,
1667 rte_strerror(EINVAL));
1669 /* Priority must be 0 */
1671 return -rte_tm_error_set(error,
1673 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1675 rte_strerror(EINVAL));
1678 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1679 return -rte_tm_error_set(error,
1681 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1683 rte_strerror(EINVAL));
1685 /* No shared shapers */
1686 if (params->n_shared_shapers != 0)
1687 return -rte_tm_error_set(error,
1689 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1691 rte_strerror(EINVAL));
1693 /* Congestion management must not be head drop */
1694 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1695 return -rte_tm_error_set(error,
1697 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1699 rte_strerror(EINVAL));
1701 /* Congestion management set to WRED */
1702 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1703 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1704 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1707 /* WRED profile (for private WRED context) must be valid */
1708 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1710 return -rte_tm_error_set(error,
1712 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1714 rte_strerror(EINVAL));
1716 /* No shared WRED contexts */
1717 if (params->leaf.wred.n_shared_wred_contexts != 0)
1718 return -rte_tm_error_set(error,
1720 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1722 rte_strerror(EINVAL));
1726 if (params->stats_mask & ~STATS_MASK_QUEUE)
1727 return -rte_tm_error_set(error,
1729 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1731 rte_strerror(EINVAL));
1737 node_add_check(struct rte_eth_dev *dev,
1739 uint32_t parent_node_id,
1743 struct rte_tm_node_params *params,
1744 struct rte_tm_error *error)
1750 /* node_id, parent_node_id:
1751 * -node_id must not be RTE_TM_NODE_ID_NULL
1752 * -node_id must not be in use
1753 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1754 * -root node must not exist
1755 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1756 * -parent_node_id must be valid
1758 if (node_id == RTE_TM_NODE_ID_NULL)
1759 return -rte_tm_error_set(error,
1761 RTE_TM_ERROR_TYPE_NODE_ID,
1763 rte_strerror(EINVAL));
1765 if (tm_node_search(dev, node_id))
1766 return -rte_tm_error_set(error,
1768 RTE_TM_ERROR_TYPE_NODE_ID,
1770 rte_strerror(EEXIST));
1772 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1774 if (tm_root_node_present(dev))
1775 return -rte_tm_error_set(error,
1777 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1779 rte_strerror(EEXIST));
1781 pn = tm_node_search(dev, parent_node_id);
1783 return -rte_tm_error_set(error,
1785 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1787 rte_strerror(EINVAL));
1790 /* priority: must be 0 .. 3 */
1791 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1792 return -rte_tm_error_set(error,
1794 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1796 rte_strerror(EINVAL));
1798 /* weight: must be 1 .. 255 */
1799 if (weight == 0 || weight >= UINT8_MAX)
1800 return -rte_tm_error_set(error,
1802 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1804 rte_strerror(EINVAL));
1806 /* level_id: if valid, then
1807 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1808 * -level_id must be zero
1809 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1810 * -level_id must be parent level ID plus one
1812 level = (pn == NULL) ? 0 : pn->level + 1;
1813 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1814 return -rte_tm_error_set(error,
1816 RTE_TM_ERROR_TYPE_LEVEL_ID,
1818 rte_strerror(EINVAL));
1820 /* params: must not be NULL */
1822 return -rte_tm_error_set(error,
1824 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1826 rte_strerror(EINVAL));
1828 /* params: per level checks */
1830 case TM_NODE_LEVEL_PORT:
1831 status = node_add_check_port(dev, node_id,
1832 parent_node_id, priority, weight, level_id,
1838 case TM_NODE_LEVEL_SUBPORT:
1839 status = node_add_check_subport(dev, node_id,
1840 parent_node_id, priority, weight, level_id,
1846 case TM_NODE_LEVEL_PIPE:
1847 status = node_add_check_pipe(dev, node_id,
1848 parent_node_id, priority, weight, level_id,
1854 case TM_NODE_LEVEL_TC:
1855 status = node_add_check_tc(dev, node_id,
1856 parent_node_id, priority, weight, level_id,
1862 case TM_NODE_LEVEL_QUEUE:
1863 status = node_add_check_queue(dev, node_id,
1864 parent_node_id, priority, weight, level_id,
1871 return -rte_tm_error_set(error,
1873 RTE_TM_ERROR_TYPE_LEVEL_ID,
1875 rte_strerror(EINVAL));
1881 /* Traffic manager node add */
1883 pmd_tm_node_add(struct rte_eth_dev *dev,
1885 uint32_t parent_node_id,
1889 struct rte_tm_node_params *params,
1890 struct rte_tm_error *error)
1892 struct pmd_internals *p = dev->data->dev_private;
1893 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1899 if (p->soft.tm.hierarchy_frozen)
1900 return -rte_tm_error_set(error,
1902 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1904 rte_strerror(EBUSY));
1906 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1907 level_id, params, error);
1911 /* Memory allocation */
1912 n = calloc(1, sizeof(struct tm_node));
1914 return -rte_tm_error_set(error,
1916 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1918 rte_strerror(ENOMEM));
1921 n->node_id = node_id;
1922 n->parent_node_id = parent_node_id;
1923 n->priority = priority;
1926 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1927 n->parent_node = tm_node_search(dev, parent_node_id);
1928 n->level = n->parent_node->level + 1;
1931 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1932 n->shaper_profile = tm_shaper_profile_search(dev,
1933 params->shaper_profile_id);
1935 if (n->level == TM_NODE_LEVEL_QUEUE &&
1936 params->leaf.cman == RTE_TM_CMAN_WRED)
1937 n->wred_profile = tm_wred_profile_search(dev,
1938 params->leaf.wred.wred_profile_id);
1940 memcpy(&n->params, params, sizeof(n->params));
1943 TAILQ_INSERT_TAIL(nl, n, node);
1944 p->soft.tm.h.n_nodes++;
1946 /* Update dependencies */
1948 n->parent_node->n_children++;
1950 if (n->shaper_profile)
1951 n->shaper_profile->n_users++;
1953 for (i = 0; i < params->n_shared_shapers; i++) {
1954 struct tm_shared_shaper *ss;
1956 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1960 if (n->wred_profile)
1961 n->wred_profile->n_users++;
1963 p->soft.tm.h.n_tm_nodes[n->level]++;
1968 /* Traffic manager node delete */
1970 pmd_tm_node_delete(struct rte_eth_dev *dev,
1972 struct rte_tm_error *error)
1974 struct pmd_internals *p = dev->data->dev_private;
1978 /* Check hierarchy changes are currently allowed */
1979 if (p->soft.tm.hierarchy_frozen)
1980 return -rte_tm_error_set(error,
1982 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1984 rte_strerror(EBUSY));
1986 /* Check existing */
1987 n = tm_node_search(dev, node_id);
1989 return -rte_tm_error_set(error,
1991 RTE_TM_ERROR_TYPE_NODE_ID,
1993 rte_strerror(EINVAL));
1997 return -rte_tm_error_set(error,
1999 RTE_TM_ERROR_TYPE_NODE_ID,
2001 rte_strerror(EBUSY));
2003 /* Update dependencies */
2004 p->soft.tm.h.n_tm_nodes[n->level]--;
2006 if (n->wred_profile)
2007 n->wred_profile->n_users--;
2009 for (i = 0; i < n->params.n_shared_shapers; i++) {
2010 struct tm_shared_shaper *ss;
2012 ss = tm_shared_shaper_search(dev,
2013 n->params.shared_shaper_id[i]);
2017 if (n->shaper_profile)
2018 n->shaper_profile->n_users--;
2021 n->parent_node->n_children--;
2023 /* Remove from list */
2024 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2025 p->soft.tm.h.n_nodes--;
2033 pipe_profile_build(struct rte_eth_dev *dev,
2035 struct rte_sched_pipe_params *pp)
2037 struct pmd_internals *p = dev->data->dev_private;
2038 struct tm_hierarchy *h = &p->soft.tm.h;
2039 struct tm_node_list *nl = &h->nodes;
2040 struct tm_node *nt, *nq;
2042 memset(pp, 0, sizeof(*pp));
2045 pp->tb_rate = np->shaper_profile->params.peak.rate;
2046 pp->tb_size = np->shaper_profile->params.peak.size;
2048 /* Traffic Class (TC) */
2049 pp->tc_period = PIPE_TC_PERIOD;
2051 #ifdef RTE_SCHED_SUBPORT_TC_OV
2052 pp->tc_ov_weight = np->weight;
2055 TAILQ_FOREACH(nt, nl, node) {
2056 uint32_t queue_id = 0;
2058 if (nt->level != TM_NODE_LEVEL_TC ||
2059 nt->parent_node_id != np->node_id)
2062 pp->tc_rate[nt->priority] =
2063 nt->shaper_profile->params.peak.rate;
2066 TAILQ_FOREACH(nq, nl, node) {
2067 uint32_t pipe_queue_id;
2069 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2070 nq->parent_node_id != nt->node_id)
2073 pipe_queue_id = nt->priority *
2074 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2075 pp->wrr_weights[pipe_queue_id] = nq->weight;
2083 pipe_profile_free_exists(struct rte_eth_dev *dev,
2084 uint32_t *pipe_profile_id)
2086 struct pmd_internals *p = dev->data->dev_private;
2087 struct tm_params *t = &p->soft.tm.params;
2089 if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2090 *pipe_profile_id = t->n_pipe_profiles;
2098 pipe_profile_exists(struct rte_eth_dev *dev,
2099 struct rte_sched_pipe_params *pp,
2100 uint32_t *pipe_profile_id)
2102 struct pmd_internals *p = dev->data->dev_private;
2103 struct tm_params *t = &p->soft.tm.params;
2106 for (i = 0; i < t->n_pipe_profiles; i++)
2107 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2108 if (pipe_profile_id)
2109 *pipe_profile_id = i;
2117 pipe_profile_install(struct rte_eth_dev *dev,
2118 struct rte_sched_pipe_params *pp,
2119 uint32_t pipe_profile_id)
2121 struct pmd_internals *p = dev->data->dev_private;
2122 struct tm_params *t = &p->soft.tm.params;
2124 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2125 t->n_pipe_profiles++;
2129 pipe_profile_mark(struct rte_eth_dev *dev,
2130 uint32_t subport_id,
2132 uint32_t pipe_profile_id)
2134 struct pmd_internals *p = dev->data->dev_private;
2135 struct tm_hierarchy *h = &p->soft.tm.h;
2136 struct tm_params *t = &p->soft.tm.params;
2137 uint32_t n_pipes_per_subport, pos;
2139 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2140 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2141 pos = subport_id * n_pipes_per_subport + pipe_id;
2143 t->pipe_to_profile[pos] = pipe_profile_id;
2146 static struct rte_sched_pipe_params *
2147 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2149 struct pmd_internals *p = dev->data->dev_private;
2150 struct tm_hierarchy *h = &p->soft.tm.h;
2151 struct tm_params *t = &p->soft.tm.params;
2152 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2153 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2155 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2156 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2158 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2159 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2161 return &t->pipe_profiles[pipe_profile_id];
2165 pipe_profiles_generate(struct rte_eth_dev *dev)
2167 struct pmd_internals *p = dev->data->dev_private;
2168 struct tm_hierarchy *h = &p->soft.tm.h;
2169 struct tm_node_list *nl = &h->nodes;
2170 struct tm_node *ns, *np;
2171 uint32_t subport_id;
2173 /* Objective: Fill in the following fields in struct tm_params:
2180 TAILQ_FOREACH(ns, nl, node) {
2183 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2187 TAILQ_FOREACH(np, nl, node) {
2188 struct rte_sched_pipe_params pp;
2191 if (np->level != TM_NODE_LEVEL_PIPE ||
2192 np->parent_node_id != ns->node_id)
2195 pipe_profile_build(dev, np, &pp);
2197 if (!pipe_profile_exists(dev, &pp, &pos)) {
2198 if (!pipe_profile_free_exists(dev, &pos))
2201 pipe_profile_install(dev, &pp, pos);
2204 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2215 static struct tm_wred_profile *
2216 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2218 struct pmd_internals *p = dev->data->dev_private;
2219 struct tm_hierarchy *h = &p->soft.tm.h;
2220 struct tm_node_list *nl = &h->nodes;
2223 TAILQ_FOREACH(nq, nl, node) {
2224 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2225 nq->parent_node->priority != tc_id)
2228 return nq->wred_profile;
2234 #ifdef RTE_SCHED_RED
2237 wred_profiles_set(struct rte_eth_dev *dev)
2239 struct pmd_internals *p = dev->data->dev_private;
2240 struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2242 enum rte_tm_color color;
2244 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2245 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2246 struct rte_red_params *dst =
2247 &pp->red_params[tc_id][color];
2248 struct tm_wred_profile *src_wp =
2249 tm_tc_wred_profile_get(dev, tc_id);
2250 struct rte_tm_red_params *src =
2251 &src_wp->params.red_params[color];
2253 memcpy(dst, src, sizeof(*dst));
2259 #define wred_profiles_set(dev)
2263 static struct tm_shared_shaper *
2264 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2266 return (tc_node->params.n_shared_shapers) ?
2267 tm_shared_shaper_search(dev,
2268 tc_node->params.shared_shaper_id[0]) :
2272 static struct tm_shared_shaper *
2273 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2274 struct tm_node *subport_node,
2277 struct pmd_internals *p = dev->data->dev_private;
2278 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2281 TAILQ_FOREACH(n, nl, node) {
2282 if (n->level != TM_NODE_LEVEL_TC ||
2283 n->parent_node->parent_node_id !=
2284 subport_node->node_id ||
2285 n->priority != tc_id)
2288 return tm_tc_shared_shaper_get(dev, n);
2295 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2297 struct pmd_internals *p = dev->data->dev_private;
2298 struct tm_hierarchy *h = &p->soft.tm.h;
2299 struct tm_node_list *nl = &h->nodes;
2300 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2301 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2302 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2303 struct tm_shared_shaper *ss;
2305 uint32_t n_pipes_per_subport;
2307 /* Root node exists. */
2309 return -rte_tm_error_set(error,
2311 RTE_TM_ERROR_TYPE_LEVEL_ID,
2313 rte_strerror(EINVAL));
2315 /* There is at least one subport, max is not exceeded. */
2316 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2317 return -rte_tm_error_set(error,
2319 RTE_TM_ERROR_TYPE_LEVEL_ID,
2321 rte_strerror(EINVAL));
2323 /* There is at least one pipe. */
2324 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2325 return -rte_tm_error_set(error,
2327 RTE_TM_ERROR_TYPE_LEVEL_ID,
2329 rte_strerror(EINVAL));
2331 /* Number of pipes is the same for all subports. Maximum number of pipes
2332 * per subport is not exceeded.
2334 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2335 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2337 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2338 return -rte_tm_error_set(error,
2340 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2342 rte_strerror(EINVAL));
2344 TAILQ_FOREACH(ns, nl, node) {
2345 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2348 if (ns->n_children != n_pipes_per_subport)
2349 return -rte_tm_error_set(error,
2351 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2353 rte_strerror(EINVAL));
2356 /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2357 TAILQ_FOREACH(np, nl, node) {
2358 uint32_t mask = 0, mask_expected =
2359 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2362 if (np->level != TM_NODE_LEVEL_PIPE)
2365 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2366 return -rte_tm_error_set(error,
2368 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2370 rte_strerror(EINVAL));
2372 TAILQ_FOREACH(nt, nl, node) {
2373 if (nt->level != TM_NODE_LEVEL_TC ||
2374 nt->parent_node_id != np->node_id)
2377 mask |= 1 << nt->priority;
2380 if (mask != mask_expected)
2381 return -rte_tm_error_set(error,
2383 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2385 rte_strerror(EINVAL));
2388 /* Each TC has exactly 4 packet queues. */
2389 TAILQ_FOREACH(nt, nl, node) {
2390 if (nt->level != TM_NODE_LEVEL_TC)
2393 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2394 return -rte_tm_error_set(error,
2396 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2398 rte_strerror(EINVAL));
2403 * -For each TC #i, all pipes in the same subport use the same
2404 * shared shaper (or no shared shaper) for their TC#i.
2405 * -Each shared shaper needs to have at least one user. All its
2406 * users have to be TC nodes with the same priority and the same
2409 TAILQ_FOREACH(ns, nl, node) {
2410 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2413 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2416 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2417 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2419 TAILQ_FOREACH(nt, nl, node) {
2420 struct tm_shared_shaper *subport_ss, *tc_ss;
2422 if (nt->level != TM_NODE_LEVEL_TC ||
2423 nt->parent_node->parent_node_id !=
2427 subport_ss = s[nt->priority];
2428 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2430 if (subport_ss == NULL && tc_ss == NULL)
2433 if ((subport_ss == NULL && tc_ss != NULL) ||
2434 (subport_ss != NULL && tc_ss == NULL) ||
2435 subport_ss->shared_shaper_id !=
2436 tc_ss->shared_shaper_id)
2437 return -rte_tm_error_set(error,
2439 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2441 rte_strerror(EINVAL));
2445 TAILQ_FOREACH(ss, ssl, node) {
2446 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2447 uint32_t n_users = 0;
2450 TAILQ_FOREACH(nt, nl, node) {
2451 if (nt->level != TM_NODE_LEVEL_TC ||
2452 nt->priority != nt_any->priority ||
2453 nt->parent_node->parent_node_id !=
2454 nt_any->parent_node->parent_node_id)
2460 if (ss->n_users == 0 || ss->n_users != n_users)
2461 return -rte_tm_error_set(error,
2463 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2465 rte_strerror(EINVAL));
2468 /* Not too many pipe profiles. */
2469 if (pipe_profiles_generate(dev))
2470 return -rte_tm_error_set(error,
2472 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2474 rte_strerror(EINVAL));
2477 * WRED (when used, i.e. at least one WRED profile defined):
2478 * -Each WRED profile must have at least one user.
2479 * -All leaf nodes must have their private WRED context enabled.
2480 * -For each TC #i, all leaf nodes must use the same WRED profile
2481 * for their private WRED context.
2483 if (h->n_wred_profiles) {
2484 struct tm_wred_profile *wp;
2485 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2488 TAILQ_FOREACH(wp, wpl, node)
2489 if (wp->n_users == 0)
2490 return -rte_tm_error_set(error,
2492 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2494 rte_strerror(EINVAL));
2496 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2497 w[id] = tm_tc_wred_profile_get(dev, id);
2500 return -rte_tm_error_set(error,
2502 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2504 rte_strerror(EINVAL));
2507 TAILQ_FOREACH(nq, nl, node) {
2510 if (nq->level != TM_NODE_LEVEL_QUEUE)
2513 id = nq->parent_node->priority;
2515 if (nq->wred_profile == NULL ||
2516 nq->wred_profile->wred_profile_id !=
2517 w[id]->wred_profile_id)
2518 return -rte_tm_error_set(error,
2520 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2522 rte_strerror(EINVAL));
2530 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2532 struct pmd_internals *p = dev->data->dev_private;
2533 struct tm_params *t = &p->soft.tm.params;
2534 struct tm_hierarchy *h = &p->soft.tm.h;
2536 struct tm_node_list *nl = &h->nodes;
2537 struct tm_node *root = tm_root_node_present(dev), *n;
2539 uint32_t subport_id;
2541 t->port_params = (struct rte_sched_port_params) {
2542 .name = dev->data->name,
2543 .socket = dev->data->numa_node,
2544 .rate = root->shaper_profile->params.peak.rate,
2545 .mtu = dev->data->mtu,
2547 root->shaper_profile->params.pkt_length_adjust,
2548 .n_subports_per_port = root->n_children,
2549 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2550 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2551 .qsize = {p->params.soft.tm.qsize[0],
2552 p->params.soft.tm.qsize[1],
2553 p->params.soft.tm.qsize[2],
2554 p->params.soft.tm.qsize[3],
2556 .pipe_profiles = t->pipe_profiles,
2557 .n_pipe_profiles = t->n_pipe_profiles,
2560 wred_profiles_set(dev);
2563 TAILQ_FOREACH(n, nl, node) {
2564 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2567 if (n->level != TM_NODE_LEVEL_SUBPORT)
2570 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2571 struct tm_shared_shaper *ss;
2572 struct tm_shaper_profile *sp;
2574 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2575 sp = (ss) ? tm_shaper_profile_search(dev,
2576 ss->shaper_profile_id) :
2578 tc_rate[i] = sp->params.peak.rate;
2581 t->subport_params[subport_id] =
2582 (struct rte_sched_subport_params) {
2583 .tb_rate = n->shaper_profile->params.peak.rate,
2584 .tb_size = n->shaper_profile->params.peak.size,
2586 .tc_rate = {tc_rate[0],
2591 .tc_period = SUBPORT_TC_PERIOD,
2598 /* Traffic manager hierarchy commit */
2600 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2602 struct rte_tm_error *error)
2604 struct pmd_internals *p = dev->data->dev_private;
2608 if (p->soft.tm.hierarchy_frozen)
2609 return -rte_tm_error_set(error,
2611 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2613 rte_strerror(EBUSY));
2615 status = hierarchy_commit_check(dev, error);
2617 if (clear_on_fail) {
2618 tm_hierarchy_uninit(p);
2619 tm_hierarchy_init(p);
2625 /* Create blueprints */
2626 hierarchy_blueprints_create(dev);
2628 /* Freeze hierarchy */
2629 p->soft.tm.hierarchy_frozen = 1;
2634 #ifdef RTE_SCHED_SUBPORT_TC_OV
2637 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2639 struct pmd_internals *p = dev->data->dev_private;
2640 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2642 struct tm_node *ns = np->parent_node;
2643 uint32_t subport_id = tm_node_subport_id(dev, ns);
2645 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2646 struct rte_sched_pipe_params profile1;
2647 uint32_t pipe_profile_id;
2649 /* Derive new pipe profile. */
2650 memcpy(&profile1, profile0, sizeof(profile1));
2651 profile1.tc_ov_weight = (uint8_t)weight;
2653 /* Since implementation does not allow adding more pipe profiles after
2654 * port configuration, the pipe configuration can be successfully
2655 * updated only if the new profile is also part of the existing set of
2658 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2661 /* Update the pipe profile used by the current pipe. */
2662 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2663 (int32_t)pipe_profile_id))
2666 /* Commit changes. */
2667 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2668 np->weight = weight;
2676 update_queue_weight(struct rte_eth_dev *dev,
2677 struct tm_node *nq, uint32_t weight)
2679 struct pmd_internals *p = dev->data->dev_private;
2680 uint32_t queue_id = tm_node_queue_id(dev, nq);
2682 struct tm_node *nt = nq->parent_node;
2683 uint32_t tc_id = tm_node_tc_id(dev, nt);
2685 struct tm_node *np = nt->parent_node;
2686 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2688 struct tm_node *ns = np->parent_node;
2689 uint32_t subport_id = tm_node_subport_id(dev, ns);
2691 uint32_t pipe_queue_id =
2692 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2694 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2695 struct rte_sched_pipe_params profile1;
2696 uint32_t pipe_profile_id;
2698 /* Derive new pipe profile. */
2699 memcpy(&profile1, profile0, sizeof(profile1));
2700 profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2702 /* Since implementation does not allow adding more pipe profiles after
2703 * port configuration, the pipe configuration can be successfully
2704 * updated only if the new profile is also part of the existing set
2707 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2710 /* Update the pipe profile used by the current pipe. */
2711 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2712 (int32_t)pipe_profile_id))
2715 /* Commit changes. */
2716 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2717 nq->weight = weight;
2722 /* Traffic manager node parent update */
2724 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2726 uint32_t parent_node_id,
2729 struct rte_tm_error *error)
2733 /* Port must be started and TM used. */
2734 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2735 return -rte_tm_error_set(error,
2737 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2739 rte_strerror(EBUSY));
2741 /* Node must be valid */
2742 n = tm_node_search(dev, node_id);
2744 return -rte_tm_error_set(error,
2746 RTE_TM_ERROR_TYPE_NODE_ID,
2748 rte_strerror(EINVAL));
2750 /* Parent node must be the same */
2751 if (n->parent_node_id != parent_node_id)
2752 return -rte_tm_error_set(error,
2754 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2756 rte_strerror(EINVAL));
2758 /* Priority must be the same */
2759 if (n->priority != priority)
2760 return -rte_tm_error_set(error,
2762 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2764 rte_strerror(EINVAL));
2766 /* weight: must be 1 .. 255 */
2767 if (weight == 0 || weight >= UINT8_MAX)
2768 return -rte_tm_error_set(error,
2770 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2772 rte_strerror(EINVAL));
2775 case TM_NODE_LEVEL_PORT:
2776 return -rte_tm_error_set(error,
2778 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2780 rte_strerror(EINVAL));
2782 case TM_NODE_LEVEL_SUBPORT:
2783 return -rte_tm_error_set(error,
2785 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2787 rte_strerror(EINVAL));
2789 case TM_NODE_LEVEL_PIPE:
2790 #ifdef RTE_SCHED_SUBPORT_TC_OV
2791 if (update_pipe_weight(dev, n, weight))
2792 return -rte_tm_error_set(error,
2794 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2796 rte_strerror(EINVAL));
2799 return -rte_tm_error_set(error,
2801 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2803 rte_strerror(EINVAL));
2806 case TM_NODE_LEVEL_TC:
2807 return -rte_tm_error_set(error,
2809 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2811 rte_strerror(EINVAL));
2813 case TM_NODE_LEVEL_QUEUE:
2816 if (update_queue_weight(dev, n, weight))
2817 return -rte_tm_error_set(error,
2819 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2821 rte_strerror(EINVAL));
2827 update_subport_rate(struct rte_eth_dev *dev,
2829 struct tm_shaper_profile *sp)
2831 struct pmd_internals *p = dev->data->dev_private;
2832 uint32_t subport_id = tm_node_subport_id(dev, ns);
2834 struct rte_sched_subport_params subport_params;
2836 /* Derive new subport configuration. */
2837 memcpy(&subport_params,
2838 &p->soft.tm.params.subport_params[subport_id],
2839 sizeof(subport_params));
2840 subport_params.tb_rate = sp->params.peak.rate;
2841 subport_params.tb_size = sp->params.peak.size;
2843 /* Update the subport configuration. */
2844 if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2848 /* Commit changes. */
2849 ns->shaper_profile->n_users--;
2851 ns->shaper_profile = sp;
2852 ns->params.shaper_profile_id = sp->shaper_profile_id;
2855 memcpy(&p->soft.tm.params.subport_params[subport_id],
2857 sizeof(subport_params));
2863 update_pipe_rate(struct rte_eth_dev *dev,
2865 struct tm_shaper_profile *sp)
2867 struct pmd_internals *p = dev->data->dev_private;
2868 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2870 struct tm_node *ns = np->parent_node;
2871 uint32_t subport_id = tm_node_subport_id(dev, ns);
2873 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2874 struct rte_sched_pipe_params profile1;
2875 uint32_t pipe_profile_id;
2877 /* Derive new pipe profile. */
2878 memcpy(&profile1, profile0, sizeof(profile1));
2879 profile1.tb_rate = sp->params.peak.rate;
2880 profile1.tb_size = sp->params.peak.size;
2882 /* Since implementation does not allow adding more pipe profiles after
2883 * port configuration, the pipe configuration can be successfully
2884 * updated only if the new profile is also part of the existing set of
2887 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2890 /* Update the pipe profile used by the current pipe. */
2891 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2892 (int32_t)pipe_profile_id))
2895 /* Commit changes. */
2896 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2897 np->shaper_profile->n_users--;
2898 np->shaper_profile = sp;
2899 np->params.shaper_profile_id = sp->shaper_profile_id;
2906 update_tc_rate(struct rte_eth_dev *dev,
2908 struct tm_shaper_profile *sp)
2910 struct pmd_internals *p = dev->data->dev_private;
2911 uint32_t tc_id = tm_node_tc_id(dev, nt);
2913 struct tm_node *np = nt->parent_node;
2914 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2916 struct tm_node *ns = np->parent_node;
2917 uint32_t subport_id = tm_node_subport_id(dev, ns);
2919 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2920 struct rte_sched_pipe_params profile1;
2921 uint32_t pipe_profile_id;
2923 /* Derive new pipe profile. */
2924 memcpy(&profile1, profile0, sizeof(profile1));
2925 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2927 /* Since implementation does not allow adding more pipe profiles after
2928 * port configuration, the pipe configuration can be successfully
2929 * updated only if the new profile is also part of the existing set of
2932 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2935 /* Update the pipe profile used by the current pipe. */
2936 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2937 (int32_t)pipe_profile_id))
2940 /* Commit changes. */
2941 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2942 nt->shaper_profile->n_users--;
2943 nt->shaper_profile = sp;
2944 nt->params.shaper_profile_id = sp->shaper_profile_id;
2950 /* Traffic manager node shaper update */
2952 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2954 uint32_t shaper_profile_id,
2955 struct rte_tm_error *error)
2958 struct tm_shaper_profile *sp;
2960 /* Port must be started and TM used. */
2961 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2962 return -rte_tm_error_set(error,
2964 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2966 rte_strerror(EBUSY));
2968 /* Node must be valid */
2969 n = tm_node_search(dev, node_id);
2971 return -rte_tm_error_set(error,
2973 RTE_TM_ERROR_TYPE_NODE_ID,
2975 rte_strerror(EINVAL));
2977 /* Shaper profile must be valid. */
2978 sp = tm_shaper_profile_search(dev, shaper_profile_id);
2980 return -rte_tm_error_set(error,
2982 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2984 rte_strerror(EINVAL));
2987 case TM_NODE_LEVEL_PORT:
2988 return -rte_tm_error_set(error,
2990 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2992 rte_strerror(EINVAL));
2994 case TM_NODE_LEVEL_SUBPORT:
2995 if (update_subport_rate(dev, n, sp))
2996 return -rte_tm_error_set(error,
2998 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3000 rte_strerror(EINVAL));
3003 case TM_NODE_LEVEL_PIPE:
3004 if (update_pipe_rate(dev, n, sp))
3005 return -rte_tm_error_set(error,
3007 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3009 rte_strerror(EINVAL));
3012 case TM_NODE_LEVEL_TC:
3013 if (update_tc_rate(dev, n, sp))
3014 return -rte_tm_error_set(error,
3016 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3018 rte_strerror(EINVAL));
3021 case TM_NODE_LEVEL_QUEUE:
3024 return -rte_tm_error_set(error,
3026 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3028 rte_strerror(EINVAL));
3032 static inline uint32_t
3033 tm_port_queue_id(struct rte_eth_dev *dev,
3034 uint32_t port_subport_id,
3035 uint32_t subport_pipe_id,
3036 uint32_t pipe_tc_id,
3037 uint32_t tc_queue_id)
3039 struct pmd_internals *p = dev->data->dev_private;
3040 struct tm_hierarchy *h = &p->soft.tm.h;
3041 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3042 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3044 uint32_t port_pipe_id =
3045 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3046 uint32_t port_tc_id =
3047 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3048 uint32_t port_queue_id =
3049 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3051 return port_queue_id;
3055 read_port_stats(struct rte_eth_dev *dev,
3057 struct rte_tm_node_stats *stats,
3058 uint64_t *stats_mask,
3061 struct pmd_internals *p = dev->data->dev_private;
3062 struct tm_hierarchy *h = &p->soft.tm.h;
3063 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3064 uint32_t subport_id;
3066 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3067 struct rte_sched_subport_stats s;
3071 int status = rte_sched_subport_read_stats(
3079 /* Stats accumulate */
3080 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3082 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3083 nr->stats.n_bytes +=
3084 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3085 nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3086 s.n_pkts_tc_dropped[id];
3087 nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3088 s.n_bytes_tc_dropped[id];
3094 memcpy(stats, &nr->stats, sizeof(*stats));
3097 *stats_mask = STATS_MASK_DEFAULT;
3101 memset(&nr->stats, 0, sizeof(nr->stats));
3107 read_subport_stats(struct rte_eth_dev *dev,
3109 struct rte_tm_node_stats *stats,
3110 uint64_t *stats_mask,
3113 struct pmd_internals *p = dev->data->dev_private;
3114 uint32_t subport_id = tm_node_subport_id(dev, ns);
3115 struct rte_sched_subport_stats s;
3116 uint32_t tc_ov, tc_id;
3119 int status = rte_sched_subport_read_stats(
3127 /* Stats accumulate */
3128 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3130 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3131 ns->stats.n_bytes +=
3132 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3133 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3134 s.n_pkts_tc_dropped[tc_id];
3135 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3136 s.n_bytes_tc_dropped[tc_id];
3141 memcpy(stats, &ns->stats, sizeof(*stats));
3144 *stats_mask = STATS_MASK_DEFAULT;
3148 memset(&ns->stats, 0, sizeof(ns->stats));
3154 read_pipe_stats(struct rte_eth_dev *dev,
3156 struct rte_tm_node_stats *stats,
3157 uint64_t *stats_mask,
3160 struct pmd_internals *p = dev->data->dev_private;
3162 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3164 struct tm_node *ns = np->parent_node;
3165 uint32_t subport_id = tm_node_subport_id(dev, ns);
3170 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3171 struct rte_sched_queue_stats s;
3174 uint32_t qid = tm_port_queue_id(dev,
3177 i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3178 i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3180 int status = rte_sched_queue_read_stats(
3188 /* Stats accumulate */
3189 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3190 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3191 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3192 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3194 np->stats.leaf.n_pkts_queued = qlen;
3199 memcpy(stats, &np->stats, sizeof(*stats));
3202 *stats_mask = STATS_MASK_DEFAULT;
3206 memset(&np->stats, 0, sizeof(np->stats));
3212 read_tc_stats(struct rte_eth_dev *dev,
3214 struct rte_tm_node_stats *stats,
3215 uint64_t *stats_mask,
3218 struct pmd_internals *p = dev->data->dev_private;
3220 uint32_t tc_id = tm_node_tc_id(dev, nt);
3222 struct tm_node *np = nt->parent_node;
3223 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3225 struct tm_node *ns = np->parent_node;
3226 uint32_t subport_id = tm_node_subport_id(dev, ns);
3231 for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3232 struct rte_sched_queue_stats s;
3235 uint32_t qid = tm_port_queue_id(dev,
3241 int status = rte_sched_queue_read_stats(
3249 /* Stats accumulate */
3250 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3251 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3252 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3253 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3255 nt->stats.leaf.n_pkts_queued = qlen;
3260 memcpy(stats, &nt->stats, sizeof(*stats));
3263 *stats_mask = STATS_MASK_DEFAULT;
3267 memset(&nt->stats, 0, sizeof(nt->stats));
3273 read_queue_stats(struct rte_eth_dev *dev,
3275 struct rte_tm_node_stats *stats,
3276 uint64_t *stats_mask,
3279 struct pmd_internals *p = dev->data->dev_private;
3280 struct rte_sched_queue_stats s;
3283 uint32_t queue_id = tm_node_queue_id(dev, nq);
3285 struct tm_node *nt = nq->parent_node;
3286 uint32_t tc_id = tm_node_tc_id(dev, nt);
3288 struct tm_node *np = nt->parent_node;
3289 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3291 struct tm_node *ns = np->parent_node;
3292 uint32_t subport_id = tm_node_subport_id(dev, ns);
3295 uint32_t qid = tm_port_queue_id(dev,
3301 int status = rte_sched_queue_read_stats(
3309 /* Stats accumulate */
3310 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3311 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3312 nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3313 nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3315 nq->stats.leaf.n_pkts_queued = qlen;
3319 memcpy(stats, &nq->stats, sizeof(*stats));
3322 *stats_mask = STATS_MASK_QUEUE;
3326 memset(&nq->stats, 0, sizeof(nq->stats));
3331 /* Traffic manager read stats counters for specific node */
3333 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3335 struct rte_tm_node_stats *stats,
3336 uint64_t *stats_mask,
3338 struct rte_tm_error *error)
3342 /* Port must be started and TM used. */
3343 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3344 return -rte_tm_error_set(error,
3346 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3348 rte_strerror(EBUSY));
3350 /* Node must be valid */
3351 n = tm_node_search(dev, node_id);
3353 return -rte_tm_error_set(error,
3355 RTE_TM_ERROR_TYPE_NODE_ID,
3357 rte_strerror(EINVAL));
3360 case TM_NODE_LEVEL_PORT:
3361 if (read_port_stats(dev, n, stats, stats_mask, clear))
3362 return -rte_tm_error_set(error,
3364 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3366 rte_strerror(EINVAL));
3369 case TM_NODE_LEVEL_SUBPORT:
3370 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3371 return -rte_tm_error_set(error,
3373 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3375 rte_strerror(EINVAL));
3378 case TM_NODE_LEVEL_PIPE:
3379 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3380 return -rte_tm_error_set(error,
3382 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3384 rte_strerror(EINVAL));
3387 case TM_NODE_LEVEL_TC:
3388 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3389 return -rte_tm_error_set(error,
3391 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3393 rte_strerror(EINVAL));
3396 case TM_NODE_LEVEL_QUEUE:
3398 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3399 return -rte_tm_error_set(error,
3401 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3403 rte_strerror(EINVAL));
3408 const struct rte_tm_ops pmd_tm_ops = {
3409 .node_type_get = pmd_tm_node_type_get,
3410 .capabilities_get = pmd_tm_capabilities_get,
3411 .level_capabilities_get = pmd_tm_level_capabilities_get,
3412 .node_capabilities_get = pmd_tm_node_capabilities_get,
3414 .wred_profile_add = pmd_tm_wred_profile_add,
3415 .wred_profile_delete = pmd_tm_wred_profile_delete,
3416 .shared_wred_context_add_update = NULL,
3417 .shared_wred_context_delete = NULL,
3419 .shaper_profile_add = pmd_tm_shaper_profile_add,
3420 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3421 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3422 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3424 .node_add = pmd_tm_node_add,
3425 .node_delete = pmd_tm_node_delete,
3426 .node_suspend = NULL,
3427 .node_resume = NULL,
3428 .hierarchy_commit = pmd_tm_hierarchy_commit,
3430 .node_parent_update = pmd_tm_node_parent_update,
3431 .node_shaper_update = pmd_tm_node_shaper_update,
3432 .node_shared_shaper_update = NULL,
3433 .node_stats_update = NULL,
3434 .node_wfq_weight_mode_update = NULL,
3435 .node_cman_update = NULL,
3436 .node_wred_context_update = NULL,
3437 .node_shared_wred_context_update = NULL,
3439 .node_stats_read = pmd_tm_node_stats_read,