1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
11 #include "rte_eth_softnic_internals.h"
12 #include "rte_eth_softnic.h"
14 #define BYTES_IN_MBPS (1000 * 1000 / 8)
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
21 uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
25 if (params->soft.tm.rate) {
26 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
29 params->soft.tm.rate =
30 (hard_rate_bytes_per_sec > UINT32_MAX) ?
31 UINT32_MAX : hard_rate_bytes_per_sec;
35 if (params->soft.tm.nb_queues == 0)
38 if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
39 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
41 params->soft.tm.nb_queues =
42 rte_align32pow2(params->soft.tm.nb_queues);
45 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
46 if (params->soft.tm.qsize[i] == 0)
49 params->soft.tm.qsize[i] =
50 rte_align32pow2(params->soft.tm.qsize[i]);
53 /* enq_bsz, deq_bsz */
54 if (params->soft.tm.enq_bsz == 0 ||
55 params->soft.tm.deq_bsz == 0 ||
56 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
63 tm_hierarchy_init(struct pmd_internals *p)
65 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
67 /* Initialize shaper profile list */
68 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
70 /* Initialize shared shaper list */
71 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
73 /* Initialize wred profile list */
74 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
76 /* Initialize TM node list */
77 TAILQ_INIT(&p->soft.tm.h.nodes);
81 tm_hierarchy_uninit(struct pmd_internals *p)
85 struct tm_node *tm_node;
87 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
91 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
95 /* Remove all WRED profiles */
97 struct tm_wred_profile *wred_profile;
99 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
100 if (wred_profile == NULL)
103 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
107 /* Remove all shared shapers */
109 struct tm_shared_shaper *shared_shaper;
111 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
112 if (shared_shaper == NULL)
115 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
119 /* Remove all shaper profiles */
121 struct tm_shaper_profile *shaper_profile;
123 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
124 if (shaper_profile == NULL)
127 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
128 shaper_profile, node);
129 free(shaper_profile);
132 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
136 tm_init(struct pmd_internals *p,
137 struct pmd_params *params,
140 uint32_t enq_bsz = params->soft.tm.enq_bsz;
141 uint32_t deq_bsz = params->soft.tm.deq_bsz;
143 p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
144 2 * enq_bsz * sizeof(struct rte_mbuf *),
148 if (p->soft.tm.pkts_enq == NULL)
151 p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
152 deq_bsz * sizeof(struct rte_mbuf *),
156 if (p->soft.tm.pkts_deq == NULL) {
157 rte_free(p->soft.tm.pkts_enq);
161 tm_hierarchy_init(p);
167 tm_free(struct pmd_internals *p)
169 tm_hierarchy_uninit(p);
170 rte_free(p->soft.tm.pkts_enq);
171 rte_free(p->soft.tm.pkts_deq);
175 tm_start(struct pmd_internals *p)
177 struct tm_params *t = &p->soft.tm.params;
178 uint32_t n_subports, subport_id;
181 /* Is hierarchy frozen? */
182 if (p->soft.tm.hierarchy_frozen == 0)
186 p->soft.tm.sched = rte_sched_port_config(&t->port_params);
187 if (p->soft.tm.sched == NULL)
191 n_subports = t->port_params.n_subports_per_port;
192 for (subport_id = 0; subport_id < n_subports; subport_id++) {
193 uint32_t n_pipes_per_subport =
194 t->port_params.n_pipes_per_subport;
197 status = rte_sched_subport_config(p->soft.tm.sched,
199 &t->subport_params[subport_id]);
201 rte_sched_port_free(p->soft.tm.sched);
206 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
207 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
208 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
210 int profile_id = t->pipe_to_profile[pos];
215 status = rte_sched_pipe_config(p->soft.tm.sched,
220 rte_sched_port_free(p->soft.tm.sched);
230 tm_stop(struct pmd_internals *p)
232 if (p->soft.tm.sched)
233 rte_sched_port_free(p->soft.tm.sched);
235 /* Unfreeze hierarchy */
236 p->soft.tm.hierarchy_frozen = 0;
239 static struct tm_shaper_profile *
240 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
242 struct pmd_internals *p = dev->data->dev_private;
243 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
244 struct tm_shaper_profile *sp;
246 TAILQ_FOREACH(sp, spl, node)
247 if (shaper_profile_id == sp->shaper_profile_id)
253 static struct tm_shared_shaper *
254 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
256 struct pmd_internals *p = dev->data->dev_private;
257 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
258 struct tm_shared_shaper *ss;
260 TAILQ_FOREACH(ss, ssl, node)
261 if (shared_shaper_id == ss->shared_shaper_id)
267 static struct tm_wred_profile *
268 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
270 struct pmd_internals *p = dev->data->dev_private;
271 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
272 struct tm_wred_profile *wp;
274 TAILQ_FOREACH(wp, wpl, node)
275 if (wred_profile_id == wp->wred_profile_id)
281 static struct tm_node *
282 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
284 struct pmd_internals *p = dev->data->dev_private;
285 struct tm_node_list *nl = &p->soft.tm.h.nodes;
288 TAILQ_FOREACH(n, nl, node)
289 if (n->node_id == node_id)
295 static struct tm_node *
296 tm_root_node_present(struct rte_eth_dev *dev)
298 struct pmd_internals *p = dev->data->dev_private;
299 struct tm_node_list *nl = &p->soft.tm.h.nodes;
302 TAILQ_FOREACH(n, nl, node)
303 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
310 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
312 struct pmd_internals *p = dev->data->dev_private;
313 struct tm_node_list *nl = &p->soft.tm.h.nodes;
318 TAILQ_FOREACH(ns, nl, node) {
319 if (ns->level != TM_NODE_LEVEL_SUBPORT)
322 if (ns->node_id == subport_node->node_id)
332 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
334 struct pmd_internals *p = dev->data->dev_private;
335 struct tm_node_list *nl = &p->soft.tm.h.nodes;
340 TAILQ_FOREACH(np, nl, node) {
341 if (np->level != TM_NODE_LEVEL_PIPE ||
342 np->parent_node_id != pipe_node->parent_node_id)
345 if (np->node_id == pipe_node->node_id)
355 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
357 return tc_node->priority;
361 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
363 struct pmd_internals *p = dev->data->dev_private;
364 struct tm_node_list *nl = &p->soft.tm.h.nodes;
369 TAILQ_FOREACH(nq, nl, node) {
370 if (nq->level != TM_NODE_LEVEL_QUEUE ||
371 nq->parent_node_id != queue_node->parent_node_id)
374 if (nq->node_id == queue_node->node_id)
384 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
386 struct pmd_internals *p = dev->data->dev_private;
387 uint32_t n_queues_max = p->params.soft.tm.nb_queues;
388 uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
389 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
390 uint32_t n_subports_max = n_pipes_max;
391 uint32_t n_root_max = 1;
394 case TM_NODE_LEVEL_PORT:
396 case TM_NODE_LEVEL_SUBPORT:
397 return n_subports_max;
398 case TM_NODE_LEVEL_PIPE:
400 case TM_NODE_LEVEL_TC:
402 case TM_NODE_LEVEL_QUEUE:
408 /* Traffic manager node type get */
410 pmd_tm_node_type_get(struct rte_eth_dev *dev,
413 struct rte_tm_error *error)
415 struct pmd_internals *p = dev->data->dev_private;
418 return -rte_tm_error_set(error,
420 RTE_TM_ERROR_TYPE_UNSPECIFIED,
422 rte_strerror(EINVAL));
424 if (node_id == RTE_TM_NODE_ID_NULL ||
425 (tm_node_search(dev, node_id) == NULL))
426 return -rte_tm_error_set(error,
428 RTE_TM_ERROR_TYPE_NODE_ID,
430 rte_strerror(EINVAL));
432 *is_leaf = node_id < p->params.soft.tm.nb_queues;
438 #define WRED_SUPPORTED 1
440 #define WRED_SUPPORTED 0
443 #define STATS_MASK_DEFAULT \
444 (RTE_TM_STATS_N_PKTS | \
445 RTE_TM_STATS_N_BYTES | \
446 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
447 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
449 #define STATS_MASK_QUEUE \
450 (STATS_MASK_DEFAULT | \
451 RTE_TM_STATS_N_PKTS_QUEUED)
453 static const struct rte_tm_capabilities tm_cap = {
454 .n_nodes_max = UINT32_MAX,
455 .n_levels_max = TM_NODE_LEVEL_MAX,
457 .non_leaf_nodes_identical = 0,
458 .leaf_nodes_identical = 1,
460 .shaper_n_max = UINT32_MAX,
461 .shaper_private_n_max = UINT32_MAX,
462 .shaper_private_dual_rate_n_max = 0,
463 .shaper_private_rate_min = 1,
464 .shaper_private_rate_max = UINT32_MAX,
466 .shaper_shared_n_max = UINT32_MAX,
467 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
468 .shaper_shared_n_shapers_per_node_max = 1,
469 .shaper_shared_dual_rate_n_max = 0,
470 .shaper_shared_rate_min = 1,
471 .shaper_shared_rate_max = UINT32_MAX,
473 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
474 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
476 .sched_n_children_max = UINT32_MAX,
477 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
478 .sched_wfq_n_children_per_group_max = UINT32_MAX,
479 .sched_wfq_n_groups_max = 1,
480 .sched_wfq_weight_max = UINT32_MAX,
482 .cman_head_drop_supported = 0,
483 .cman_wred_context_n_max = 0,
484 .cman_wred_context_private_n_max = 0,
485 .cman_wred_context_shared_n_max = 0,
486 .cman_wred_context_shared_n_nodes_per_context_max = 0,
487 .cman_wred_context_shared_n_contexts_per_node_max = 0,
489 .mark_vlan_dei_supported = {0, 0, 0},
490 .mark_ip_ecn_tcp_supported = {0, 0, 0},
491 .mark_ip_ecn_sctp_supported = {0, 0, 0},
492 .mark_ip_dscp_supported = {0, 0, 0},
494 .dynamic_update_mask = 0,
496 .stats_mask = STATS_MASK_QUEUE,
499 /* Traffic manager capabilities get */
501 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
502 struct rte_tm_capabilities *cap,
503 struct rte_tm_error *error)
506 return -rte_tm_error_set(error,
508 RTE_TM_ERROR_TYPE_CAPABILITIES,
510 rte_strerror(EINVAL));
512 memcpy(cap, &tm_cap, sizeof(*cap));
514 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
515 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
516 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
517 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
518 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
520 cap->shaper_private_n_max =
521 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
522 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
523 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
524 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
526 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
527 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
529 cap->shaper_n_max = cap->shaper_private_n_max +
530 cap->shaper_shared_n_max;
532 cap->shaper_shared_n_nodes_per_shaper_max =
533 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
535 cap->sched_n_children_max = RTE_MAX(
536 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
537 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
539 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
542 cap->cman_wred_context_private_n_max =
543 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
545 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
546 cap->cman_wred_context_shared_n_max;
551 static const struct rte_tm_level_capabilities tm_level_cap[] = {
552 [TM_NODE_LEVEL_PORT] = {
554 .n_nodes_nonleaf_max = 1,
555 .n_nodes_leaf_max = 0,
556 .non_leaf_nodes_identical = 1,
557 .leaf_nodes_identical = 0,
560 .shaper_private_supported = 1,
561 .shaper_private_dual_rate_supported = 0,
562 .shaper_private_rate_min = 1,
563 .shaper_private_rate_max = UINT32_MAX,
564 .shaper_shared_n_max = 0,
566 .sched_n_children_max = UINT32_MAX,
567 .sched_sp_n_priorities_max = 1,
568 .sched_wfq_n_children_per_group_max = UINT32_MAX,
569 .sched_wfq_n_groups_max = 1,
570 .sched_wfq_weight_max = 1,
572 .stats_mask = STATS_MASK_DEFAULT,
576 [TM_NODE_LEVEL_SUBPORT] = {
577 .n_nodes_max = UINT32_MAX,
578 .n_nodes_nonleaf_max = UINT32_MAX,
579 .n_nodes_leaf_max = 0,
580 .non_leaf_nodes_identical = 1,
581 .leaf_nodes_identical = 0,
584 .shaper_private_supported = 1,
585 .shaper_private_dual_rate_supported = 0,
586 .shaper_private_rate_min = 1,
587 .shaper_private_rate_max = UINT32_MAX,
588 .shaper_shared_n_max = 0,
590 .sched_n_children_max = UINT32_MAX,
591 .sched_sp_n_priorities_max = 1,
592 .sched_wfq_n_children_per_group_max = UINT32_MAX,
593 .sched_wfq_n_groups_max = 1,
594 #ifdef RTE_SCHED_SUBPORT_TC_OV
595 .sched_wfq_weight_max = UINT32_MAX,
597 .sched_wfq_weight_max = 1,
599 .stats_mask = STATS_MASK_DEFAULT,
603 [TM_NODE_LEVEL_PIPE] = {
604 .n_nodes_max = UINT32_MAX,
605 .n_nodes_nonleaf_max = UINT32_MAX,
606 .n_nodes_leaf_max = 0,
607 .non_leaf_nodes_identical = 1,
608 .leaf_nodes_identical = 0,
611 .shaper_private_supported = 1,
612 .shaper_private_dual_rate_supported = 0,
613 .shaper_private_rate_min = 1,
614 .shaper_private_rate_max = UINT32_MAX,
615 .shaper_shared_n_max = 0,
617 .sched_n_children_max =
618 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
619 .sched_sp_n_priorities_max =
620 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
621 .sched_wfq_n_children_per_group_max = 1,
622 .sched_wfq_n_groups_max = 0,
623 .sched_wfq_weight_max = 1,
625 .stats_mask = STATS_MASK_DEFAULT,
629 [TM_NODE_LEVEL_TC] = {
630 .n_nodes_max = UINT32_MAX,
631 .n_nodes_nonleaf_max = UINT32_MAX,
632 .n_nodes_leaf_max = 0,
633 .non_leaf_nodes_identical = 1,
634 .leaf_nodes_identical = 0,
637 .shaper_private_supported = 1,
638 .shaper_private_dual_rate_supported = 0,
639 .shaper_private_rate_min = 1,
640 .shaper_private_rate_max = UINT32_MAX,
641 .shaper_shared_n_max = 1,
643 .sched_n_children_max =
644 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
645 .sched_sp_n_priorities_max = 1,
646 .sched_wfq_n_children_per_group_max =
647 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
648 .sched_wfq_n_groups_max = 1,
649 .sched_wfq_weight_max = UINT32_MAX,
651 .stats_mask = STATS_MASK_DEFAULT,
655 [TM_NODE_LEVEL_QUEUE] = {
656 .n_nodes_max = UINT32_MAX,
657 .n_nodes_nonleaf_max = 0,
658 .n_nodes_leaf_max = UINT32_MAX,
659 .non_leaf_nodes_identical = 0,
660 .leaf_nodes_identical = 1,
663 .shaper_private_supported = 0,
664 .shaper_private_dual_rate_supported = 0,
665 .shaper_private_rate_min = 0,
666 .shaper_private_rate_max = 0,
667 .shaper_shared_n_max = 0,
669 .cman_head_drop_supported = 0,
670 .cman_wred_context_private_supported = WRED_SUPPORTED,
671 .cman_wred_context_shared_n_max = 0,
673 .stats_mask = STATS_MASK_QUEUE,
678 /* Traffic manager level capabilities get */
680 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
682 struct rte_tm_level_capabilities *cap,
683 struct rte_tm_error *error)
686 return -rte_tm_error_set(error,
688 RTE_TM_ERROR_TYPE_CAPABILITIES,
690 rte_strerror(EINVAL));
692 if (level_id >= TM_NODE_LEVEL_MAX)
693 return -rte_tm_error_set(error,
695 RTE_TM_ERROR_TYPE_LEVEL_ID,
697 rte_strerror(EINVAL));
699 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
702 case TM_NODE_LEVEL_PORT:
703 cap->nonleaf.sched_n_children_max =
704 tm_level_get_max_nodes(dev,
705 TM_NODE_LEVEL_SUBPORT);
706 cap->nonleaf.sched_wfq_n_children_per_group_max =
707 cap->nonleaf.sched_n_children_max;
710 case TM_NODE_LEVEL_SUBPORT:
711 cap->n_nodes_max = tm_level_get_max_nodes(dev,
712 TM_NODE_LEVEL_SUBPORT);
713 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
714 cap->nonleaf.sched_n_children_max =
715 tm_level_get_max_nodes(dev,
717 cap->nonleaf.sched_wfq_n_children_per_group_max =
718 cap->nonleaf.sched_n_children_max;
721 case TM_NODE_LEVEL_PIPE:
722 cap->n_nodes_max = tm_level_get_max_nodes(dev,
724 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
727 case TM_NODE_LEVEL_TC:
728 cap->n_nodes_max = tm_level_get_max_nodes(dev,
730 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
733 case TM_NODE_LEVEL_QUEUE:
735 cap->n_nodes_max = tm_level_get_max_nodes(dev,
736 TM_NODE_LEVEL_QUEUE);
737 cap->n_nodes_leaf_max = cap->n_nodes_max;
744 static const struct rte_tm_node_capabilities tm_node_cap[] = {
745 [TM_NODE_LEVEL_PORT] = {
746 .shaper_private_supported = 1,
747 .shaper_private_dual_rate_supported = 0,
748 .shaper_private_rate_min = 1,
749 .shaper_private_rate_max = UINT32_MAX,
750 .shaper_shared_n_max = 0,
753 .sched_n_children_max = UINT32_MAX,
754 .sched_sp_n_priorities_max = 1,
755 .sched_wfq_n_children_per_group_max = UINT32_MAX,
756 .sched_wfq_n_groups_max = 1,
757 .sched_wfq_weight_max = 1,
760 .stats_mask = STATS_MASK_DEFAULT,
763 [TM_NODE_LEVEL_SUBPORT] = {
764 .shaper_private_supported = 1,
765 .shaper_private_dual_rate_supported = 0,
766 .shaper_private_rate_min = 1,
767 .shaper_private_rate_max = UINT32_MAX,
768 .shaper_shared_n_max = 0,
771 .sched_n_children_max = UINT32_MAX,
772 .sched_sp_n_priorities_max = 1,
773 .sched_wfq_n_children_per_group_max = UINT32_MAX,
774 .sched_wfq_n_groups_max = 1,
775 .sched_wfq_weight_max = UINT32_MAX,
778 .stats_mask = STATS_MASK_DEFAULT,
781 [TM_NODE_LEVEL_PIPE] = {
782 .shaper_private_supported = 1,
783 .shaper_private_dual_rate_supported = 0,
784 .shaper_private_rate_min = 1,
785 .shaper_private_rate_max = UINT32_MAX,
786 .shaper_shared_n_max = 0,
789 .sched_n_children_max =
790 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
791 .sched_sp_n_priorities_max =
792 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
793 .sched_wfq_n_children_per_group_max = 1,
794 .sched_wfq_n_groups_max = 0,
795 .sched_wfq_weight_max = 1,
798 .stats_mask = STATS_MASK_DEFAULT,
801 [TM_NODE_LEVEL_TC] = {
802 .shaper_private_supported = 1,
803 .shaper_private_dual_rate_supported = 0,
804 .shaper_private_rate_min = 1,
805 .shaper_private_rate_max = UINT32_MAX,
806 .shaper_shared_n_max = 1,
809 .sched_n_children_max =
810 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
811 .sched_sp_n_priorities_max = 1,
812 .sched_wfq_n_children_per_group_max =
813 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
814 .sched_wfq_n_groups_max = 1,
815 .sched_wfq_weight_max = UINT32_MAX,
818 .stats_mask = STATS_MASK_DEFAULT,
821 [TM_NODE_LEVEL_QUEUE] = {
822 .shaper_private_supported = 0,
823 .shaper_private_dual_rate_supported = 0,
824 .shaper_private_rate_min = 0,
825 .shaper_private_rate_max = 0,
826 .shaper_shared_n_max = 0,
830 .cman_head_drop_supported = 0,
831 .cman_wred_context_private_supported = WRED_SUPPORTED,
832 .cman_wred_context_shared_n_max = 0,
835 .stats_mask = STATS_MASK_QUEUE,
839 /* Traffic manager node capabilities get */
841 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
843 struct rte_tm_node_capabilities *cap,
844 struct rte_tm_error *error)
846 struct tm_node *tm_node;
849 return -rte_tm_error_set(error,
851 RTE_TM_ERROR_TYPE_CAPABILITIES,
853 rte_strerror(EINVAL));
855 tm_node = tm_node_search(dev, node_id);
857 return -rte_tm_error_set(error,
859 RTE_TM_ERROR_TYPE_NODE_ID,
861 rte_strerror(EINVAL));
863 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
865 switch (tm_node->level) {
866 case TM_NODE_LEVEL_PORT:
867 cap->nonleaf.sched_n_children_max =
868 tm_level_get_max_nodes(dev,
869 TM_NODE_LEVEL_SUBPORT);
870 cap->nonleaf.sched_wfq_n_children_per_group_max =
871 cap->nonleaf.sched_n_children_max;
874 case TM_NODE_LEVEL_SUBPORT:
875 cap->nonleaf.sched_n_children_max =
876 tm_level_get_max_nodes(dev,
878 cap->nonleaf.sched_wfq_n_children_per_group_max =
879 cap->nonleaf.sched_n_children_max;
882 case TM_NODE_LEVEL_PIPE:
883 case TM_NODE_LEVEL_TC:
884 case TM_NODE_LEVEL_QUEUE:
893 shaper_profile_check(struct rte_eth_dev *dev,
894 uint32_t shaper_profile_id,
895 struct rte_tm_shaper_params *profile,
896 struct rte_tm_error *error)
898 struct tm_shaper_profile *sp;
900 /* Shaper profile ID must not be NONE. */
901 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
902 return -rte_tm_error_set(error,
904 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
906 rte_strerror(EINVAL));
908 /* Shaper profile must not exist. */
909 sp = tm_shaper_profile_search(dev, shaper_profile_id);
911 return -rte_tm_error_set(error,
913 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
915 rte_strerror(EEXIST));
917 /* Profile must not be NULL. */
919 return -rte_tm_error_set(error,
921 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
923 rte_strerror(EINVAL));
925 /* Peak rate: non-zero, 32-bit */
926 if (profile->peak.rate == 0 ||
927 profile->peak.rate >= UINT32_MAX)
928 return -rte_tm_error_set(error,
930 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
932 rte_strerror(EINVAL));
934 /* Peak size: non-zero, 32-bit */
935 if (profile->peak.size == 0 ||
936 profile->peak.size >= UINT32_MAX)
937 return -rte_tm_error_set(error,
939 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
941 rte_strerror(EINVAL));
943 /* Dual-rate profiles are not supported. */
944 if (profile->committed.rate != 0)
945 return -rte_tm_error_set(error,
947 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
949 rte_strerror(EINVAL));
951 /* Packet length adjust: 24 bytes */
952 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
953 return -rte_tm_error_set(error,
955 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
957 rte_strerror(EINVAL));
962 /* Traffic manager shaper profile add */
964 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
965 uint32_t shaper_profile_id,
966 struct rte_tm_shaper_params *profile,
967 struct rte_tm_error *error)
969 struct pmd_internals *p = dev->data->dev_private;
970 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
971 struct tm_shaper_profile *sp;
974 /* Check input params */
975 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
979 /* Memory allocation */
980 sp = calloc(1, sizeof(struct tm_shaper_profile));
982 return -rte_tm_error_set(error,
984 RTE_TM_ERROR_TYPE_UNSPECIFIED,
986 rte_strerror(ENOMEM));
989 sp->shaper_profile_id = shaper_profile_id;
990 memcpy(&sp->params, profile, sizeof(sp->params));
993 TAILQ_INSERT_TAIL(spl, sp, node);
994 p->soft.tm.h.n_shaper_profiles++;
999 /* Traffic manager shaper profile delete */
1001 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1002 uint32_t shaper_profile_id,
1003 struct rte_tm_error *error)
1005 struct pmd_internals *p = dev->data->dev_private;
1006 struct tm_shaper_profile *sp;
1008 /* Check existing */
1009 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1011 return -rte_tm_error_set(error,
1013 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1015 rte_strerror(EINVAL));
1019 return -rte_tm_error_set(error,
1021 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1023 rte_strerror(EBUSY));
1025 /* Remove from list */
1026 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1027 p->soft.tm.h.n_shaper_profiles--;
1033 static struct tm_node *
1034 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1035 struct tm_shared_shaper *ss)
1037 struct pmd_internals *p = dev->data->dev_private;
1038 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1041 /* Subport: each TC uses shared shaper */
1042 TAILQ_FOREACH(n, nl, node) {
1043 if (n->level != TM_NODE_LEVEL_TC ||
1044 n->params.n_shared_shapers == 0 ||
1045 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1055 update_subport_tc_rate(struct rte_eth_dev *dev,
1057 struct tm_shared_shaper *ss,
1058 struct tm_shaper_profile *sp_new)
1060 struct pmd_internals *p = dev->data->dev_private;
1061 uint32_t tc_id = tm_node_tc_id(dev, nt);
1063 struct tm_node *np = nt->parent_node;
1065 struct tm_node *ns = np->parent_node;
1066 uint32_t subport_id = tm_node_subport_id(dev, ns);
1068 struct rte_sched_subport_params subport_params;
1070 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1071 ss->shaper_profile_id);
1073 /* Derive new subport configuration. */
1074 memcpy(&subport_params,
1075 &p->soft.tm.params.subport_params[subport_id],
1076 sizeof(subport_params));
1077 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1079 /* Update the subport configuration. */
1080 if (rte_sched_subport_config(p->soft.tm.sched,
1081 subport_id, &subport_params))
1084 /* Commit changes. */
1087 ss->shaper_profile_id = sp_new->shaper_profile_id;
1090 memcpy(&p->soft.tm.params.subport_params[subport_id],
1092 sizeof(subport_params));
1097 /* Traffic manager shared shaper add/update */
1099 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1100 uint32_t shared_shaper_id,
1101 uint32_t shaper_profile_id,
1102 struct rte_tm_error *error)
1104 struct pmd_internals *p = dev->data->dev_private;
1105 struct tm_shared_shaper *ss;
1106 struct tm_shaper_profile *sp;
1109 /* Shaper profile must be valid. */
1110 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1112 return -rte_tm_error_set(error,
1114 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1116 rte_strerror(EINVAL));
1119 * Add new shared shaper
1121 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1123 struct tm_shared_shaper_list *ssl =
1124 &p->soft.tm.h.shared_shapers;
1126 /* Hierarchy must not be frozen */
1127 if (p->soft.tm.hierarchy_frozen)
1128 return -rte_tm_error_set(error,
1130 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1132 rte_strerror(EBUSY));
1134 /* Memory allocation */
1135 ss = calloc(1, sizeof(struct tm_shared_shaper));
1137 return -rte_tm_error_set(error,
1139 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1141 rte_strerror(ENOMEM));
1144 ss->shared_shaper_id = shared_shaper_id;
1145 ss->shaper_profile_id = shaper_profile_id;
1148 TAILQ_INSERT_TAIL(ssl, ss, node);
1149 p->soft.tm.h.n_shared_shapers++;
1155 * Update existing shared shaper
1157 /* Hierarchy must be frozen (run-time update) */
1158 if (p->soft.tm.hierarchy_frozen == 0)
1159 return -rte_tm_error_set(error,
1161 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1163 rte_strerror(EBUSY));
1166 /* Propagate change. */
1167 nt = tm_shared_shaper_get_tc(dev, ss);
1168 if (update_subport_tc_rate(dev, nt, ss, sp))
1169 return -rte_tm_error_set(error,
1171 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1173 rte_strerror(EINVAL));
1178 /* Traffic manager shared shaper delete */
1180 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1181 uint32_t shared_shaper_id,
1182 struct rte_tm_error *error)
1184 struct pmd_internals *p = dev->data->dev_private;
1185 struct tm_shared_shaper *ss;
1187 /* Check existing */
1188 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1190 return -rte_tm_error_set(error,
1192 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1194 rte_strerror(EINVAL));
1198 return -rte_tm_error_set(error,
1200 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1202 rte_strerror(EBUSY));
1204 /* Remove from list */
1205 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1206 p->soft.tm.h.n_shared_shapers--;
1213 wred_profile_check(struct rte_eth_dev *dev,
1214 uint32_t wred_profile_id,
1215 struct rte_tm_wred_params *profile,
1216 struct rte_tm_error *error)
1218 struct tm_wred_profile *wp;
1219 enum rte_tm_color color;
1221 /* WRED profile ID must not be NONE. */
1222 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1223 return -rte_tm_error_set(error,
1225 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1227 rte_strerror(EINVAL));
1229 /* WRED profile must not exist. */
1230 wp = tm_wred_profile_search(dev, wred_profile_id);
1232 return -rte_tm_error_set(error,
1234 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1236 rte_strerror(EEXIST));
1238 /* Profile must not be NULL. */
1239 if (profile == NULL)
1240 return -rte_tm_error_set(error,
1242 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1244 rte_strerror(EINVAL));
1246 /* min_th <= max_th, max_th > 0 */
1247 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1248 uint16_t min_th = profile->red_params[color].min_th;
1249 uint16_t max_th = profile->red_params[color].max_th;
1251 if (min_th > max_th || max_th == 0)
1252 return -rte_tm_error_set(error,
1254 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1256 rte_strerror(EINVAL));
1262 /* Traffic manager WRED profile add */
1264 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1265 uint32_t wred_profile_id,
1266 struct rte_tm_wred_params *profile,
1267 struct rte_tm_error *error)
1269 struct pmd_internals *p = dev->data->dev_private;
1270 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1271 struct tm_wred_profile *wp;
1274 /* Check input params */
1275 status = wred_profile_check(dev, wred_profile_id, profile, error);
1279 /* Memory allocation */
1280 wp = calloc(1, sizeof(struct tm_wred_profile));
1282 return -rte_tm_error_set(error,
1284 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1286 rte_strerror(ENOMEM));
1289 wp->wred_profile_id = wred_profile_id;
1290 memcpy(&wp->params, profile, sizeof(wp->params));
1293 TAILQ_INSERT_TAIL(wpl, wp, node);
1294 p->soft.tm.h.n_wred_profiles++;
1299 /* Traffic manager WRED profile delete */
1301 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1302 uint32_t wred_profile_id,
1303 struct rte_tm_error *error)
1305 struct pmd_internals *p = dev->data->dev_private;
1306 struct tm_wred_profile *wp;
1308 /* Check existing */
1309 wp = tm_wred_profile_search(dev, wred_profile_id);
1311 return -rte_tm_error_set(error,
1313 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1315 rte_strerror(EINVAL));
1319 return -rte_tm_error_set(error,
1321 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1323 rte_strerror(EBUSY));
1325 /* Remove from list */
1326 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1327 p->soft.tm.h.n_wred_profiles--;
1334 node_add_check_port(struct rte_eth_dev *dev,
1336 uint32_t parent_node_id __rte_unused,
1339 uint32_t level_id __rte_unused,
1340 struct rte_tm_node_params *params,
1341 struct rte_tm_error *error)
1343 struct pmd_internals *p = dev->data->dev_private;
1344 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1345 params->shaper_profile_id);
1347 /* node type: non-leaf */
1348 if (node_id < p->params.soft.tm.nb_queues)
1349 return -rte_tm_error_set(error,
1351 RTE_TM_ERROR_TYPE_NODE_ID,
1353 rte_strerror(EINVAL));
1355 /* Priority must be 0 */
1357 return -rte_tm_error_set(error,
1359 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1361 rte_strerror(EINVAL));
1363 /* Weight must be 1 */
1365 return -rte_tm_error_set(error,
1367 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1369 rte_strerror(EINVAL));
1371 /* Shaper must be valid.
1372 * Shaper profile peak rate must fit the configured port rate.
1374 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1376 sp->params.peak.rate > p->params.soft.tm.rate)
1377 return -rte_tm_error_set(error,
1379 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1381 rte_strerror(EINVAL));
1383 /* No shared shapers */
1384 if (params->n_shared_shapers != 0)
1385 return -rte_tm_error_set(error,
1387 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1389 rte_strerror(EINVAL));
1391 /* Number of SP priorities must be 1 */
1392 if (params->nonleaf.n_sp_priorities != 1)
1393 return -rte_tm_error_set(error,
1395 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1397 rte_strerror(EINVAL));
1400 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1401 return -rte_tm_error_set(error,
1403 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1405 rte_strerror(EINVAL));
1411 node_add_check_subport(struct rte_eth_dev *dev,
1413 uint32_t parent_node_id __rte_unused,
1416 uint32_t level_id __rte_unused,
1417 struct rte_tm_node_params *params,
1418 struct rte_tm_error *error)
1420 struct pmd_internals *p = dev->data->dev_private;
1422 /* node type: non-leaf */
1423 if (node_id < p->params.soft.tm.nb_queues)
1424 return -rte_tm_error_set(error,
1426 RTE_TM_ERROR_TYPE_NODE_ID,
1428 rte_strerror(EINVAL));
1430 /* Priority must be 0 */
1432 return -rte_tm_error_set(error,
1434 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1436 rte_strerror(EINVAL));
1438 /* Weight must be 1 */
1440 return -rte_tm_error_set(error,
1442 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1444 rte_strerror(EINVAL));
1446 /* Shaper must be valid */
1447 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1448 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1449 return -rte_tm_error_set(error,
1451 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1453 rte_strerror(EINVAL));
1455 /* No shared shapers */
1456 if (params->n_shared_shapers != 0)
1457 return -rte_tm_error_set(error,
1459 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1461 rte_strerror(EINVAL));
1463 /* Number of SP priorities must be 1 */
1464 if (params->nonleaf.n_sp_priorities != 1)
1465 return -rte_tm_error_set(error,
1467 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1469 rte_strerror(EINVAL));
1472 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1473 return -rte_tm_error_set(error,
1475 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1477 rte_strerror(EINVAL));
1483 node_add_check_pipe(struct rte_eth_dev *dev,
1485 uint32_t parent_node_id __rte_unused,
1487 uint32_t weight __rte_unused,
1488 uint32_t level_id __rte_unused,
1489 struct rte_tm_node_params *params,
1490 struct rte_tm_error *error)
1492 struct pmd_internals *p = dev->data->dev_private;
1494 /* node type: non-leaf */
1495 if (node_id < p->params.soft.tm.nb_queues)
1496 return -rte_tm_error_set(error,
1498 RTE_TM_ERROR_TYPE_NODE_ID,
1500 rte_strerror(EINVAL));
1502 /* Priority must be 0 */
1504 return -rte_tm_error_set(error,
1506 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1508 rte_strerror(EINVAL));
1510 /* Shaper must be valid */
1511 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1512 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1513 return -rte_tm_error_set(error,
1515 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1517 rte_strerror(EINVAL));
1519 /* No shared shapers */
1520 if (params->n_shared_shapers != 0)
1521 return -rte_tm_error_set(error,
1523 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1525 rte_strerror(EINVAL));
1527 /* Number of SP priorities must be 4 */
1528 if (params->nonleaf.n_sp_priorities !=
1529 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1530 return -rte_tm_error_set(error,
1532 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1534 rte_strerror(EINVAL));
1536 /* WFQ mode must be byte mode */
1537 if (params->nonleaf.wfq_weight_mode != NULL &&
1538 params->nonleaf.wfq_weight_mode[0] != 0 &&
1539 params->nonleaf.wfq_weight_mode[1] != 0 &&
1540 params->nonleaf.wfq_weight_mode[2] != 0 &&
1541 params->nonleaf.wfq_weight_mode[3] != 0)
1542 return -rte_tm_error_set(error,
1544 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1546 rte_strerror(EINVAL));
1549 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1550 return -rte_tm_error_set(error,
1552 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1554 rte_strerror(EINVAL));
1560 node_add_check_tc(struct rte_eth_dev *dev,
1562 uint32_t parent_node_id __rte_unused,
1563 uint32_t priority __rte_unused,
1565 uint32_t level_id __rte_unused,
1566 struct rte_tm_node_params *params,
1567 struct rte_tm_error *error)
1569 struct pmd_internals *p = dev->data->dev_private;
1571 /* node type: non-leaf */
1572 if (node_id < p->params.soft.tm.nb_queues)
1573 return -rte_tm_error_set(error,
1575 RTE_TM_ERROR_TYPE_NODE_ID,
1577 rte_strerror(EINVAL));
1579 /* Weight must be 1 */
1581 return -rte_tm_error_set(error,
1583 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1585 rte_strerror(EINVAL));
1587 /* Shaper must be valid */
1588 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1589 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1590 return -rte_tm_error_set(error,
1592 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1594 rte_strerror(EINVAL));
1596 /* Single valid shared shaper */
1597 if (params->n_shared_shapers > 1)
1598 return -rte_tm_error_set(error,
1600 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1602 rte_strerror(EINVAL));
1604 if (params->n_shared_shapers == 1 &&
1605 (params->shared_shaper_id == NULL ||
1606 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1607 return -rte_tm_error_set(error,
1609 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1611 rte_strerror(EINVAL));
1613 /* Number of priorities must be 1 */
1614 if (params->nonleaf.n_sp_priorities != 1)
1615 return -rte_tm_error_set(error,
1617 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1619 rte_strerror(EINVAL));
1622 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1623 return -rte_tm_error_set(error,
1625 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1627 rte_strerror(EINVAL));
1633 node_add_check_queue(struct rte_eth_dev *dev,
1635 uint32_t parent_node_id __rte_unused,
1637 uint32_t weight __rte_unused,
1638 uint32_t level_id __rte_unused,
1639 struct rte_tm_node_params *params,
1640 struct rte_tm_error *error)
1642 struct pmd_internals *p = dev->data->dev_private;
1644 /* node type: leaf */
1645 if (node_id >= p->params.soft.tm.nb_queues)
1646 return -rte_tm_error_set(error,
1648 RTE_TM_ERROR_TYPE_NODE_ID,
1650 rte_strerror(EINVAL));
1652 /* Priority must be 0 */
1654 return -rte_tm_error_set(error,
1656 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1658 rte_strerror(EINVAL));
1661 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1662 return -rte_tm_error_set(error,
1664 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1666 rte_strerror(EINVAL));
1668 /* No shared shapers */
1669 if (params->n_shared_shapers != 0)
1670 return -rte_tm_error_set(error,
1672 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1674 rte_strerror(EINVAL));
1676 /* Congestion management must not be head drop */
1677 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1678 return -rte_tm_error_set(error,
1680 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1682 rte_strerror(EINVAL));
1684 /* Congestion management set to WRED */
1685 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1686 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1687 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1690 /* WRED profile (for private WRED context) must be valid */
1691 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1693 return -rte_tm_error_set(error,
1695 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1697 rte_strerror(EINVAL));
1699 /* No shared WRED contexts */
1700 if (params->leaf.wred.n_shared_wred_contexts != 0)
1701 return -rte_tm_error_set(error,
1703 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1705 rte_strerror(EINVAL));
1709 if (params->stats_mask & ~STATS_MASK_QUEUE)
1710 return -rte_tm_error_set(error,
1712 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1714 rte_strerror(EINVAL));
1720 node_add_check(struct rte_eth_dev *dev,
1722 uint32_t parent_node_id,
1726 struct rte_tm_node_params *params,
1727 struct rte_tm_error *error)
1733 /* node_id, parent_node_id:
1734 * -node_id must not be RTE_TM_NODE_ID_NULL
1735 * -node_id must not be in use
1736 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1737 * -root node must not exist
1738 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1739 * -parent_node_id must be valid
1741 if (node_id == RTE_TM_NODE_ID_NULL)
1742 return -rte_tm_error_set(error,
1744 RTE_TM_ERROR_TYPE_NODE_ID,
1746 rte_strerror(EINVAL));
1748 if (tm_node_search(dev, node_id))
1749 return -rte_tm_error_set(error,
1751 RTE_TM_ERROR_TYPE_NODE_ID,
1753 rte_strerror(EEXIST));
1755 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1757 if (tm_root_node_present(dev))
1758 return -rte_tm_error_set(error,
1760 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1762 rte_strerror(EEXIST));
1764 pn = tm_node_search(dev, parent_node_id);
1766 return -rte_tm_error_set(error,
1768 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1770 rte_strerror(EINVAL));
1773 /* priority: must be 0 .. 3 */
1774 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1775 return -rte_tm_error_set(error,
1777 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1779 rte_strerror(EINVAL));
1781 /* weight: must be 1 .. 255 */
1782 if (weight == 0 || weight >= UINT8_MAX)
1783 return -rte_tm_error_set(error,
1785 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1787 rte_strerror(EINVAL));
1789 /* level_id: if valid, then
1790 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1791 * -level_id must be zero
1792 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1793 * -level_id must be parent level ID plus one
1795 level = (pn == NULL) ? 0 : pn->level + 1;
1796 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1797 return -rte_tm_error_set(error,
1799 RTE_TM_ERROR_TYPE_LEVEL_ID,
1801 rte_strerror(EINVAL));
1803 /* params: must not be NULL */
1805 return -rte_tm_error_set(error,
1807 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1809 rte_strerror(EINVAL));
1811 /* params: per level checks */
1813 case TM_NODE_LEVEL_PORT:
1814 status = node_add_check_port(dev, node_id,
1815 parent_node_id, priority, weight, level_id,
1821 case TM_NODE_LEVEL_SUBPORT:
1822 status = node_add_check_subport(dev, node_id,
1823 parent_node_id, priority, weight, level_id,
1829 case TM_NODE_LEVEL_PIPE:
1830 status = node_add_check_pipe(dev, node_id,
1831 parent_node_id, priority, weight, level_id,
1837 case TM_NODE_LEVEL_TC:
1838 status = node_add_check_tc(dev, node_id,
1839 parent_node_id, priority, weight, level_id,
1845 case TM_NODE_LEVEL_QUEUE:
1846 status = node_add_check_queue(dev, node_id,
1847 parent_node_id, priority, weight, level_id,
1854 return -rte_tm_error_set(error,
1856 RTE_TM_ERROR_TYPE_LEVEL_ID,
1858 rte_strerror(EINVAL));
1864 /* Traffic manager node add */
1866 pmd_tm_node_add(struct rte_eth_dev *dev,
1868 uint32_t parent_node_id,
1872 struct rte_tm_node_params *params,
1873 struct rte_tm_error *error)
1875 struct pmd_internals *p = dev->data->dev_private;
1876 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1882 if (p->soft.tm.hierarchy_frozen)
1883 return -rte_tm_error_set(error,
1885 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1887 rte_strerror(EBUSY));
1889 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1890 level_id, params, error);
1894 /* Memory allocation */
1895 n = calloc(1, sizeof(struct tm_node));
1897 return -rte_tm_error_set(error,
1899 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1901 rte_strerror(ENOMEM));
1904 n->node_id = node_id;
1905 n->parent_node_id = parent_node_id;
1906 n->priority = priority;
1909 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1910 n->parent_node = tm_node_search(dev, parent_node_id);
1911 n->level = n->parent_node->level + 1;
1914 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1915 n->shaper_profile = tm_shaper_profile_search(dev,
1916 params->shaper_profile_id);
1918 if (n->level == TM_NODE_LEVEL_QUEUE &&
1919 params->leaf.cman == RTE_TM_CMAN_WRED)
1920 n->wred_profile = tm_wred_profile_search(dev,
1921 params->leaf.wred.wred_profile_id);
1923 memcpy(&n->params, params, sizeof(n->params));
1926 TAILQ_INSERT_TAIL(nl, n, node);
1927 p->soft.tm.h.n_nodes++;
1929 /* Update dependencies */
1931 n->parent_node->n_children++;
1933 if (n->shaper_profile)
1934 n->shaper_profile->n_users++;
1936 for (i = 0; i < params->n_shared_shapers; i++) {
1937 struct tm_shared_shaper *ss;
1939 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1943 if (n->wred_profile)
1944 n->wred_profile->n_users++;
1946 p->soft.tm.h.n_tm_nodes[n->level]++;
1951 /* Traffic manager node delete */
1953 pmd_tm_node_delete(struct rte_eth_dev *dev,
1955 struct rte_tm_error *error)
1957 struct pmd_internals *p = dev->data->dev_private;
1961 /* Check hierarchy changes are currently allowed */
1962 if (p->soft.tm.hierarchy_frozen)
1963 return -rte_tm_error_set(error,
1965 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1967 rte_strerror(EBUSY));
1969 /* Check existing */
1970 n = tm_node_search(dev, node_id);
1972 return -rte_tm_error_set(error,
1974 RTE_TM_ERROR_TYPE_NODE_ID,
1976 rte_strerror(EINVAL));
1980 return -rte_tm_error_set(error,
1982 RTE_TM_ERROR_TYPE_NODE_ID,
1984 rte_strerror(EBUSY));
1986 /* Update dependencies */
1987 p->soft.tm.h.n_tm_nodes[n->level]--;
1989 if (n->wred_profile)
1990 n->wred_profile->n_users--;
1992 for (i = 0; i < n->params.n_shared_shapers; i++) {
1993 struct tm_shared_shaper *ss;
1995 ss = tm_shared_shaper_search(dev,
1996 n->params.shared_shaper_id[i]);
2000 if (n->shaper_profile)
2001 n->shaper_profile->n_users--;
2004 n->parent_node->n_children--;
2006 /* Remove from list */
2007 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2008 p->soft.tm.h.n_nodes--;
2016 pipe_profile_build(struct rte_eth_dev *dev,
2018 struct rte_sched_pipe_params *pp)
2020 struct pmd_internals *p = dev->data->dev_private;
2021 struct tm_hierarchy *h = &p->soft.tm.h;
2022 struct tm_node_list *nl = &h->nodes;
2023 struct tm_node *nt, *nq;
2025 memset(pp, 0, sizeof(*pp));
2028 pp->tb_rate = np->shaper_profile->params.peak.rate;
2029 pp->tb_size = np->shaper_profile->params.peak.size;
2031 /* Traffic Class (TC) */
2032 pp->tc_period = PIPE_TC_PERIOD;
2034 #ifdef RTE_SCHED_SUBPORT_TC_OV
2035 pp->tc_ov_weight = np->weight;
2038 TAILQ_FOREACH(nt, nl, node) {
2039 uint32_t queue_id = 0;
2041 if (nt->level != TM_NODE_LEVEL_TC ||
2042 nt->parent_node_id != np->node_id)
2045 pp->tc_rate[nt->priority] =
2046 nt->shaper_profile->params.peak.rate;
2049 TAILQ_FOREACH(nq, nl, node) {
2050 uint32_t pipe_queue_id;
2052 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2053 nq->parent_node_id != nt->node_id)
2056 pipe_queue_id = nt->priority *
2057 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2058 pp->wrr_weights[pipe_queue_id] = nq->weight;
2066 pipe_profile_free_exists(struct rte_eth_dev *dev,
2067 uint32_t *pipe_profile_id)
2069 struct pmd_internals *p = dev->data->dev_private;
2070 struct tm_params *t = &p->soft.tm.params;
2072 if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2073 *pipe_profile_id = t->n_pipe_profiles;
2081 pipe_profile_exists(struct rte_eth_dev *dev,
2082 struct rte_sched_pipe_params *pp,
2083 uint32_t *pipe_profile_id)
2085 struct pmd_internals *p = dev->data->dev_private;
2086 struct tm_params *t = &p->soft.tm.params;
2089 for (i = 0; i < t->n_pipe_profiles; i++)
2090 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2091 if (pipe_profile_id)
2092 *pipe_profile_id = i;
2100 pipe_profile_install(struct rte_eth_dev *dev,
2101 struct rte_sched_pipe_params *pp,
2102 uint32_t pipe_profile_id)
2104 struct pmd_internals *p = dev->data->dev_private;
2105 struct tm_params *t = &p->soft.tm.params;
2107 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2108 t->n_pipe_profiles++;
2112 pipe_profile_mark(struct rte_eth_dev *dev,
2113 uint32_t subport_id,
2115 uint32_t pipe_profile_id)
2117 struct pmd_internals *p = dev->data->dev_private;
2118 struct tm_hierarchy *h = &p->soft.tm.h;
2119 struct tm_params *t = &p->soft.tm.params;
2120 uint32_t n_pipes_per_subport, pos;
2122 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2123 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2124 pos = subport_id * n_pipes_per_subport + pipe_id;
2126 t->pipe_to_profile[pos] = pipe_profile_id;
2129 static struct rte_sched_pipe_params *
2130 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2132 struct pmd_internals *p = dev->data->dev_private;
2133 struct tm_hierarchy *h = &p->soft.tm.h;
2134 struct tm_params *t = &p->soft.tm.params;
2135 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2136 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2138 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2139 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2141 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2142 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2144 return &t->pipe_profiles[pipe_profile_id];
2148 pipe_profiles_generate(struct rte_eth_dev *dev)
2150 struct pmd_internals *p = dev->data->dev_private;
2151 struct tm_hierarchy *h = &p->soft.tm.h;
2152 struct tm_node_list *nl = &h->nodes;
2153 struct tm_node *ns, *np;
2154 uint32_t subport_id;
2156 /* Objective: Fill in the following fields in struct tm_params:
2163 TAILQ_FOREACH(ns, nl, node) {
2166 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2170 TAILQ_FOREACH(np, nl, node) {
2171 struct rte_sched_pipe_params pp;
2174 if (np->level != TM_NODE_LEVEL_PIPE ||
2175 np->parent_node_id != ns->node_id)
2178 pipe_profile_build(dev, np, &pp);
2180 if (!pipe_profile_exists(dev, &pp, &pos)) {
2181 if (!pipe_profile_free_exists(dev, &pos))
2184 pipe_profile_install(dev, &pp, pos);
2187 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2198 static struct tm_wred_profile *
2199 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2201 struct pmd_internals *p = dev->data->dev_private;
2202 struct tm_hierarchy *h = &p->soft.tm.h;
2203 struct tm_node_list *nl = &h->nodes;
2206 TAILQ_FOREACH(nq, nl, node) {
2207 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2208 nq->parent_node->priority != tc_id)
2211 return nq->wred_profile;
2217 #ifdef RTE_SCHED_RED
2220 wred_profiles_set(struct rte_eth_dev *dev)
2222 struct pmd_internals *p = dev->data->dev_private;
2223 struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2225 enum rte_tm_color color;
2227 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2228 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2229 struct rte_red_params *dst =
2230 &pp->red_params[tc_id][color];
2231 struct tm_wred_profile *src_wp =
2232 tm_tc_wred_profile_get(dev, tc_id);
2233 struct rte_tm_red_params *src =
2234 &src_wp->params.red_params[color];
2236 memcpy(dst, src, sizeof(*dst));
2242 #define wred_profiles_set(dev)
2246 static struct tm_shared_shaper *
2247 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2249 return (tc_node->params.n_shared_shapers) ?
2250 tm_shared_shaper_search(dev,
2251 tc_node->params.shared_shaper_id[0]) :
2255 static struct tm_shared_shaper *
2256 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2257 struct tm_node *subport_node,
2260 struct pmd_internals *p = dev->data->dev_private;
2261 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2264 TAILQ_FOREACH(n, nl, node) {
2265 if (n->level != TM_NODE_LEVEL_TC ||
2266 n->parent_node->parent_node_id !=
2267 subport_node->node_id ||
2268 n->priority != tc_id)
2271 return tm_tc_shared_shaper_get(dev, n);
2278 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2280 struct pmd_internals *p = dev->data->dev_private;
2281 struct tm_hierarchy *h = &p->soft.tm.h;
2282 struct tm_node_list *nl = &h->nodes;
2283 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2284 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2285 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2286 struct tm_shared_shaper *ss;
2288 uint32_t n_pipes_per_subport;
2290 /* Root node exists. */
2292 return -rte_tm_error_set(error,
2294 RTE_TM_ERROR_TYPE_LEVEL_ID,
2296 rte_strerror(EINVAL));
2298 /* There is at least one subport, max is not exceeded. */
2299 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2300 return -rte_tm_error_set(error,
2302 RTE_TM_ERROR_TYPE_LEVEL_ID,
2304 rte_strerror(EINVAL));
2306 /* There is at least one pipe. */
2307 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2308 return -rte_tm_error_set(error,
2310 RTE_TM_ERROR_TYPE_LEVEL_ID,
2312 rte_strerror(EINVAL));
2314 /* Number of pipes is the same for all subports. Maximum number of pipes
2315 * per subport is not exceeded.
2317 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2318 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2320 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2321 return -rte_tm_error_set(error,
2323 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2325 rte_strerror(EINVAL));
2327 TAILQ_FOREACH(ns, nl, node) {
2328 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2331 if (ns->n_children != n_pipes_per_subport)
2332 return -rte_tm_error_set(error,
2334 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2336 rte_strerror(EINVAL));
2339 /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2340 TAILQ_FOREACH(np, nl, node) {
2341 uint32_t mask = 0, mask_expected =
2342 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2345 if (np->level != TM_NODE_LEVEL_PIPE)
2348 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2349 return -rte_tm_error_set(error,
2351 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2353 rte_strerror(EINVAL));
2355 TAILQ_FOREACH(nt, nl, node) {
2356 if (nt->level != TM_NODE_LEVEL_TC ||
2357 nt->parent_node_id != np->node_id)
2360 mask |= 1 << nt->priority;
2363 if (mask != mask_expected)
2364 return -rte_tm_error_set(error,
2366 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2368 rte_strerror(EINVAL));
2371 /* Each TC has exactly 4 packet queues. */
2372 TAILQ_FOREACH(nt, nl, node) {
2373 if (nt->level != TM_NODE_LEVEL_TC)
2376 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2377 return -rte_tm_error_set(error,
2379 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2381 rte_strerror(EINVAL));
2386 * -For each TC #i, all pipes in the same subport use the same
2387 * shared shaper (or no shared shaper) for their TC#i.
2388 * -Each shared shaper needs to have at least one user. All its
2389 * users have to be TC nodes with the same priority and the same
2392 TAILQ_FOREACH(ns, nl, node) {
2393 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2396 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2399 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2400 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2402 TAILQ_FOREACH(nt, nl, node) {
2403 struct tm_shared_shaper *subport_ss, *tc_ss;
2405 if (nt->level != TM_NODE_LEVEL_TC ||
2406 nt->parent_node->parent_node_id !=
2410 subport_ss = s[nt->priority];
2411 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2413 if (subport_ss == NULL && tc_ss == NULL)
2416 if ((subport_ss == NULL && tc_ss != NULL) ||
2417 (subport_ss != NULL && tc_ss == NULL) ||
2418 subport_ss->shared_shaper_id !=
2419 tc_ss->shared_shaper_id)
2420 return -rte_tm_error_set(error,
2422 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2424 rte_strerror(EINVAL));
2428 TAILQ_FOREACH(ss, ssl, node) {
2429 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2430 uint32_t n_users = 0;
2433 TAILQ_FOREACH(nt, nl, node) {
2434 if (nt->level != TM_NODE_LEVEL_TC ||
2435 nt->priority != nt_any->priority ||
2436 nt->parent_node->parent_node_id !=
2437 nt_any->parent_node->parent_node_id)
2443 if (ss->n_users == 0 || ss->n_users != n_users)
2444 return -rte_tm_error_set(error,
2446 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2448 rte_strerror(EINVAL));
2451 /* Not too many pipe profiles. */
2452 if (pipe_profiles_generate(dev))
2453 return -rte_tm_error_set(error,
2455 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2457 rte_strerror(EINVAL));
2460 * WRED (when used, i.e. at least one WRED profile defined):
2461 * -Each WRED profile must have at least one user.
2462 * -All leaf nodes must have their private WRED context enabled.
2463 * -For each TC #i, all leaf nodes must use the same WRED profile
2464 * for their private WRED context.
2466 if (h->n_wred_profiles) {
2467 struct tm_wred_profile *wp;
2468 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2471 TAILQ_FOREACH(wp, wpl, node)
2472 if (wp->n_users == 0)
2473 return -rte_tm_error_set(error,
2475 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2477 rte_strerror(EINVAL));
2479 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2480 w[id] = tm_tc_wred_profile_get(dev, id);
2483 return -rte_tm_error_set(error,
2485 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2487 rte_strerror(EINVAL));
2490 TAILQ_FOREACH(nq, nl, node) {
2493 if (nq->level != TM_NODE_LEVEL_QUEUE)
2496 id = nq->parent_node->priority;
2498 if (nq->wred_profile == NULL ||
2499 nq->wred_profile->wred_profile_id !=
2500 w[id]->wred_profile_id)
2501 return -rte_tm_error_set(error,
2503 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2505 rte_strerror(EINVAL));
2513 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2515 struct pmd_internals *p = dev->data->dev_private;
2516 struct tm_params *t = &p->soft.tm.params;
2517 struct tm_hierarchy *h = &p->soft.tm.h;
2519 struct tm_node_list *nl = &h->nodes;
2520 struct tm_node *root = tm_root_node_present(dev), *n;
2522 uint32_t subport_id;
2524 t->port_params = (struct rte_sched_port_params) {
2525 .name = dev->data->name,
2526 .socket = dev->data->numa_node,
2527 .rate = root->shaper_profile->params.peak.rate,
2528 .mtu = dev->data->mtu,
2530 root->shaper_profile->params.pkt_length_adjust,
2531 .n_subports_per_port = root->n_children,
2532 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2533 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2534 .qsize = {p->params.soft.tm.qsize[0],
2535 p->params.soft.tm.qsize[1],
2536 p->params.soft.tm.qsize[2],
2537 p->params.soft.tm.qsize[3],
2539 .pipe_profiles = t->pipe_profiles,
2540 .n_pipe_profiles = t->n_pipe_profiles,
2543 wred_profiles_set(dev);
2546 TAILQ_FOREACH(n, nl, node) {
2547 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2550 if (n->level != TM_NODE_LEVEL_SUBPORT)
2553 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2554 struct tm_shared_shaper *ss;
2555 struct tm_shaper_profile *sp;
2557 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2558 sp = (ss) ? tm_shaper_profile_search(dev,
2559 ss->shaper_profile_id) :
2561 tc_rate[i] = sp->params.peak.rate;
2564 t->subport_params[subport_id] =
2565 (struct rte_sched_subport_params) {
2566 .tb_rate = n->shaper_profile->params.peak.rate,
2567 .tb_size = n->shaper_profile->params.peak.size,
2569 .tc_rate = {tc_rate[0],
2574 .tc_period = SUBPORT_TC_PERIOD,
2581 /* Traffic manager hierarchy commit */
2583 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2585 struct rte_tm_error *error)
2587 struct pmd_internals *p = dev->data->dev_private;
2591 if (p->soft.tm.hierarchy_frozen)
2592 return -rte_tm_error_set(error,
2594 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2596 rte_strerror(EBUSY));
2598 status = hierarchy_commit_check(dev, error);
2600 if (clear_on_fail) {
2601 tm_hierarchy_uninit(p);
2602 tm_hierarchy_init(p);
2608 /* Create blueprints */
2609 hierarchy_blueprints_create(dev);
2611 /* Freeze hierarchy */
2612 p->soft.tm.hierarchy_frozen = 1;
2617 #ifdef RTE_SCHED_SUBPORT_TC_OV
2620 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2622 struct pmd_internals *p = dev->data->dev_private;
2623 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2625 struct tm_node *ns = np->parent_node;
2626 uint32_t subport_id = tm_node_subport_id(dev, ns);
2628 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2629 struct rte_sched_pipe_params profile1;
2630 uint32_t pipe_profile_id;
2632 /* Derive new pipe profile. */
2633 memcpy(&profile1, profile0, sizeof(profile1));
2634 profile1.tc_ov_weight = (uint8_t)weight;
2636 /* Since implementation does not allow adding more pipe profiles after
2637 * port configuration, the pipe configuration can be successfully
2638 * updated only if the new profile is also part of the existing set of
2641 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2644 /* Update the pipe profile used by the current pipe. */
2645 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2646 (int32_t)pipe_profile_id))
2649 /* Commit changes. */
2650 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2651 np->weight = weight;
2659 update_queue_weight(struct rte_eth_dev *dev,
2660 struct tm_node *nq, uint32_t weight)
2662 struct pmd_internals *p = dev->data->dev_private;
2663 uint32_t queue_id = tm_node_queue_id(dev, nq);
2665 struct tm_node *nt = nq->parent_node;
2666 uint32_t tc_id = tm_node_tc_id(dev, nt);
2668 struct tm_node *np = nt->parent_node;
2669 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2671 struct tm_node *ns = np->parent_node;
2672 uint32_t subport_id = tm_node_subport_id(dev, ns);
2674 uint32_t pipe_queue_id =
2675 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2677 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2678 struct rte_sched_pipe_params profile1;
2679 uint32_t pipe_profile_id;
2681 /* Derive new pipe profile. */
2682 memcpy(&profile1, profile0, sizeof(profile1));
2683 profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2685 /* Since implementation does not allow adding more pipe profiles after
2686 * port configuration, the pipe configuration can be successfully
2687 * updated only if the new profile is also part of the existing set
2690 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2693 /* Update the pipe profile used by the current pipe. */
2694 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2695 (int32_t)pipe_profile_id))
2698 /* Commit changes. */
2699 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2700 nq->weight = weight;
2705 /* Traffic manager node parent update */
2707 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2709 uint32_t parent_node_id,
2712 struct rte_tm_error *error)
2716 /* Port must be started and TM used. */
2717 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2718 return -rte_tm_error_set(error,
2720 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2722 rte_strerror(EBUSY));
2724 /* Node must be valid */
2725 n = tm_node_search(dev, node_id);
2727 return -rte_tm_error_set(error,
2729 RTE_TM_ERROR_TYPE_NODE_ID,
2731 rte_strerror(EINVAL));
2733 /* Parent node must be the same */
2734 if (n->parent_node_id != parent_node_id)
2735 return -rte_tm_error_set(error,
2737 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2739 rte_strerror(EINVAL));
2741 /* Priority must be the same */
2742 if (n->priority != priority)
2743 return -rte_tm_error_set(error,
2745 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2747 rte_strerror(EINVAL));
2749 /* weight: must be 1 .. 255 */
2750 if (weight == 0 || weight >= UINT8_MAX)
2751 return -rte_tm_error_set(error,
2753 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2755 rte_strerror(EINVAL));
2758 case TM_NODE_LEVEL_PORT:
2759 return -rte_tm_error_set(error,
2761 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2763 rte_strerror(EINVAL));
2765 case TM_NODE_LEVEL_SUBPORT:
2766 return -rte_tm_error_set(error,
2768 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2770 rte_strerror(EINVAL));
2772 case TM_NODE_LEVEL_PIPE:
2773 #ifdef RTE_SCHED_SUBPORT_TC_OV
2774 if (update_pipe_weight(dev, n, weight))
2775 return -rte_tm_error_set(error,
2777 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2779 rte_strerror(EINVAL));
2782 return -rte_tm_error_set(error,
2784 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2786 rte_strerror(EINVAL));
2789 case TM_NODE_LEVEL_TC:
2790 return -rte_tm_error_set(error,
2792 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2794 rte_strerror(EINVAL));
2796 case TM_NODE_LEVEL_QUEUE:
2799 if (update_queue_weight(dev, n, weight))
2800 return -rte_tm_error_set(error,
2802 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2804 rte_strerror(EINVAL));
2810 update_subport_rate(struct rte_eth_dev *dev,
2812 struct tm_shaper_profile *sp)
2814 struct pmd_internals *p = dev->data->dev_private;
2815 uint32_t subport_id = tm_node_subport_id(dev, ns);
2817 struct rte_sched_subport_params subport_params;
2819 /* Derive new subport configuration. */
2820 memcpy(&subport_params,
2821 &p->soft.tm.params.subport_params[subport_id],
2822 sizeof(subport_params));
2823 subport_params.tb_rate = sp->params.peak.rate;
2824 subport_params.tb_size = sp->params.peak.size;
2826 /* Update the subport configuration. */
2827 if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2831 /* Commit changes. */
2832 ns->shaper_profile->n_users--;
2834 ns->shaper_profile = sp;
2835 ns->params.shaper_profile_id = sp->shaper_profile_id;
2838 memcpy(&p->soft.tm.params.subport_params[subport_id],
2840 sizeof(subport_params));
2846 update_pipe_rate(struct rte_eth_dev *dev,
2848 struct tm_shaper_profile *sp)
2850 struct pmd_internals *p = dev->data->dev_private;
2851 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2853 struct tm_node *ns = np->parent_node;
2854 uint32_t subport_id = tm_node_subport_id(dev, ns);
2856 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2857 struct rte_sched_pipe_params profile1;
2858 uint32_t pipe_profile_id;
2860 /* Derive new pipe profile. */
2861 memcpy(&profile1, profile0, sizeof(profile1));
2862 profile1.tb_rate = sp->params.peak.rate;
2863 profile1.tb_size = sp->params.peak.size;
2865 /* Since implementation does not allow adding more pipe profiles after
2866 * port configuration, the pipe configuration can be successfully
2867 * updated only if the new profile is also part of the existing set of
2870 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2873 /* Update the pipe profile used by the current pipe. */
2874 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2875 (int32_t)pipe_profile_id))
2878 /* Commit changes. */
2879 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2880 np->shaper_profile->n_users--;
2881 np->shaper_profile = sp;
2882 np->params.shaper_profile_id = sp->shaper_profile_id;
2889 update_tc_rate(struct rte_eth_dev *dev,
2891 struct tm_shaper_profile *sp)
2893 struct pmd_internals *p = dev->data->dev_private;
2894 uint32_t tc_id = tm_node_tc_id(dev, nt);
2896 struct tm_node *np = nt->parent_node;
2897 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2899 struct tm_node *ns = np->parent_node;
2900 uint32_t subport_id = tm_node_subport_id(dev, ns);
2902 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2903 struct rte_sched_pipe_params profile1;
2904 uint32_t pipe_profile_id;
2906 /* Derive new pipe profile. */
2907 memcpy(&profile1, profile0, sizeof(profile1));
2908 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2910 /* Since implementation does not allow adding more pipe profiles after
2911 * port configuration, the pipe configuration can be successfully
2912 * updated only if the new profile is also part of the existing set of
2915 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2918 /* Update the pipe profile used by the current pipe. */
2919 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2920 (int32_t)pipe_profile_id))
2923 /* Commit changes. */
2924 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2925 nt->shaper_profile->n_users--;
2926 nt->shaper_profile = sp;
2927 nt->params.shaper_profile_id = sp->shaper_profile_id;
2933 /* Traffic manager node shaper update */
2935 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2937 uint32_t shaper_profile_id,
2938 struct rte_tm_error *error)
2941 struct tm_shaper_profile *sp;
2943 /* Port must be started and TM used. */
2944 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2945 return -rte_tm_error_set(error,
2947 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2949 rte_strerror(EBUSY));
2951 /* Node must be valid */
2952 n = tm_node_search(dev, node_id);
2954 return -rte_tm_error_set(error,
2956 RTE_TM_ERROR_TYPE_NODE_ID,
2958 rte_strerror(EINVAL));
2960 /* Shaper profile must be valid. */
2961 sp = tm_shaper_profile_search(dev, shaper_profile_id);
2963 return -rte_tm_error_set(error,
2965 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2967 rte_strerror(EINVAL));
2970 case TM_NODE_LEVEL_PORT:
2971 return -rte_tm_error_set(error,
2973 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2975 rte_strerror(EINVAL));
2977 case TM_NODE_LEVEL_SUBPORT:
2978 if (update_subport_rate(dev, n, sp))
2979 return -rte_tm_error_set(error,
2981 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2983 rte_strerror(EINVAL));
2986 case TM_NODE_LEVEL_PIPE:
2987 if (update_pipe_rate(dev, n, sp))
2988 return -rte_tm_error_set(error,
2990 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2992 rte_strerror(EINVAL));
2995 case TM_NODE_LEVEL_TC:
2996 if (update_tc_rate(dev, n, sp))
2997 return -rte_tm_error_set(error,
2999 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3001 rte_strerror(EINVAL));
3004 case TM_NODE_LEVEL_QUEUE:
3007 return -rte_tm_error_set(error,
3009 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3011 rte_strerror(EINVAL));
3015 static inline uint32_t
3016 tm_port_queue_id(struct rte_eth_dev *dev,
3017 uint32_t port_subport_id,
3018 uint32_t subport_pipe_id,
3019 uint32_t pipe_tc_id,
3020 uint32_t tc_queue_id)
3022 struct pmd_internals *p = dev->data->dev_private;
3023 struct tm_hierarchy *h = &p->soft.tm.h;
3024 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3025 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3027 uint32_t port_pipe_id =
3028 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3029 uint32_t port_tc_id =
3030 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3031 uint32_t port_queue_id =
3032 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3034 return port_queue_id;
3038 read_port_stats(struct rte_eth_dev *dev,
3040 struct rte_tm_node_stats *stats,
3041 uint64_t *stats_mask,
3044 struct pmd_internals *p = dev->data->dev_private;
3045 struct tm_hierarchy *h = &p->soft.tm.h;
3046 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3047 uint32_t subport_id;
3049 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3050 struct rte_sched_subport_stats s;
3054 int status = rte_sched_subport_read_stats(
3062 /* Stats accumulate */
3063 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3065 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3066 nr->stats.n_bytes +=
3067 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3068 nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3069 s.n_pkts_tc_dropped[id];
3070 nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3071 s.n_bytes_tc_dropped[id];
3077 memcpy(stats, &nr->stats, sizeof(*stats));
3080 *stats_mask = STATS_MASK_DEFAULT;
3084 memset(&nr->stats, 0, sizeof(nr->stats));
3090 read_subport_stats(struct rte_eth_dev *dev,
3092 struct rte_tm_node_stats *stats,
3093 uint64_t *stats_mask,
3096 struct pmd_internals *p = dev->data->dev_private;
3097 uint32_t subport_id = tm_node_subport_id(dev, ns);
3098 struct rte_sched_subport_stats s;
3099 uint32_t tc_ov, tc_id;
3102 int status = rte_sched_subport_read_stats(
3110 /* Stats accumulate */
3111 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3113 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3114 ns->stats.n_bytes +=
3115 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3116 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3117 s.n_pkts_tc_dropped[tc_id];
3118 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3119 s.n_bytes_tc_dropped[tc_id];
3124 memcpy(stats, &ns->stats, sizeof(*stats));
3127 *stats_mask = STATS_MASK_DEFAULT;
3131 memset(&ns->stats, 0, sizeof(ns->stats));
3137 read_pipe_stats(struct rte_eth_dev *dev,
3139 struct rte_tm_node_stats *stats,
3140 uint64_t *stats_mask,
3143 struct pmd_internals *p = dev->data->dev_private;
3145 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3147 struct tm_node *ns = np->parent_node;
3148 uint32_t subport_id = tm_node_subport_id(dev, ns);
3153 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3154 struct rte_sched_queue_stats s;
3157 uint32_t qid = tm_port_queue_id(dev,
3160 i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3161 i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3163 int status = rte_sched_queue_read_stats(
3171 /* Stats accumulate */
3172 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3173 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3174 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3175 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3177 np->stats.leaf.n_pkts_queued = qlen;
3182 memcpy(stats, &np->stats, sizeof(*stats));
3185 *stats_mask = STATS_MASK_DEFAULT;
3189 memset(&np->stats, 0, sizeof(np->stats));
3195 read_tc_stats(struct rte_eth_dev *dev,
3197 struct rte_tm_node_stats *stats,
3198 uint64_t *stats_mask,
3201 struct pmd_internals *p = dev->data->dev_private;
3203 uint32_t tc_id = tm_node_tc_id(dev, nt);
3205 struct tm_node *np = nt->parent_node;
3206 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3208 struct tm_node *ns = np->parent_node;
3209 uint32_t subport_id = tm_node_subport_id(dev, ns);
3214 for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3215 struct rte_sched_queue_stats s;
3218 uint32_t qid = tm_port_queue_id(dev,
3224 int status = rte_sched_queue_read_stats(
3232 /* Stats accumulate */
3233 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3234 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3235 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3236 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3238 nt->stats.leaf.n_pkts_queued = qlen;
3243 memcpy(stats, &nt->stats, sizeof(*stats));
3246 *stats_mask = STATS_MASK_DEFAULT;
3250 memset(&nt->stats, 0, sizeof(nt->stats));
3256 read_queue_stats(struct rte_eth_dev *dev,
3258 struct rte_tm_node_stats *stats,
3259 uint64_t *stats_mask,
3262 struct pmd_internals *p = dev->data->dev_private;
3263 struct rte_sched_queue_stats s;
3266 uint32_t queue_id = tm_node_queue_id(dev, nq);
3268 struct tm_node *nt = nq->parent_node;
3269 uint32_t tc_id = tm_node_tc_id(dev, nt);
3271 struct tm_node *np = nt->parent_node;
3272 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3274 struct tm_node *ns = np->parent_node;
3275 uint32_t subport_id = tm_node_subport_id(dev, ns);
3278 uint32_t qid = tm_port_queue_id(dev,
3284 int status = rte_sched_queue_read_stats(
3292 /* Stats accumulate */
3293 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3294 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3295 nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3296 nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3298 nq->stats.leaf.n_pkts_queued = qlen;
3302 memcpy(stats, &nq->stats, sizeof(*stats));
3305 *stats_mask = STATS_MASK_QUEUE;
3309 memset(&nq->stats, 0, sizeof(nq->stats));
3314 /* Traffic manager read stats counters for specific node */
3316 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3318 struct rte_tm_node_stats *stats,
3319 uint64_t *stats_mask,
3321 struct rte_tm_error *error)
3325 /* Port must be started and TM used. */
3326 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3327 return -rte_tm_error_set(error,
3329 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3331 rte_strerror(EBUSY));
3333 /* Node must be valid */
3334 n = tm_node_search(dev, node_id);
3336 return -rte_tm_error_set(error,
3338 RTE_TM_ERROR_TYPE_NODE_ID,
3340 rte_strerror(EINVAL));
3343 case TM_NODE_LEVEL_PORT:
3344 if (read_port_stats(dev, n, stats, stats_mask, clear))
3345 return -rte_tm_error_set(error,
3347 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3349 rte_strerror(EINVAL));
3352 case TM_NODE_LEVEL_SUBPORT:
3353 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3354 return -rte_tm_error_set(error,
3356 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3358 rte_strerror(EINVAL));
3361 case TM_NODE_LEVEL_PIPE:
3362 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3363 return -rte_tm_error_set(error,
3365 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3367 rte_strerror(EINVAL));
3370 case TM_NODE_LEVEL_TC:
3371 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3372 return -rte_tm_error_set(error,
3374 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3376 rte_strerror(EINVAL));
3379 case TM_NODE_LEVEL_QUEUE:
3381 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3382 return -rte_tm_error_set(error,
3384 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3386 rte_strerror(EINVAL));
3391 const struct rte_tm_ops pmd_tm_ops = {
3392 .node_type_get = pmd_tm_node_type_get,
3393 .capabilities_get = pmd_tm_capabilities_get,
3394 .level_capabilities_get = pmd_tm_level_capabilities_get,
3395 .node_capabilities_get = pmd_tm_node_capabilities_get,
3397 .wred_profile_add = pmd_tm_wred_profile_add,
3398 .wred_profile_delete = pmd_tm_wred_profile_delete,
3399 .shared_wred_context_add_update = NULL,
3400 .shared_wred_context_delete = NULL,
3402 .shaper_profile_add = pmd_tm_shaper_profile_add,
3403 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3404 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3405 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3407 .node_add = pmd_tm_node_add,
3408 .node_delete = pmd_tm_node_delete,
3409 .node_suspend = NULL,
3410 .node_resume = NULL,
3411 .hierarchy_commit = pmd_tm_hierarchy_commit,
3413 .node_parent_update = pmd_tm_node_parent_update,
3414 .node_shaper_update = pmd_tm_node_shaper_update,
3415 .node_shared_shaper_update = NULL,
3416 .node_stats_update = NULL,
3417 .node_wfq_weight_mode_update = NULL,
3418 .node_cman_update = NULL,
3419 .node_wred_context_update = NULL,
3420 .node_shared_wred_context_update = NULL,
3422 .node_stats_read = pmd_tm_node_stats_read,