1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 softnic_tmgr_init(struct pmd_internals *p)
21 TAILQ_INIT(&p->tmgr_port_list);
27 softnic_tmgr_free(struct pmd_internals *p)
30 struct softnic_tmgr_port *tmgr_port;
32 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33 if (tmgr_port == NULL)
36 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37 rte_sched_port_free(tmgr_port->s);
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
46 struct softnic_tmgr_port *tmgr_port;
51 TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52 if (strcmp(tmgr_port->name, name) == 0)
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
62 struct softnic_tmgr_port *tmgr_port;
63 struct tm_params *t = &p->soft.tm.params;
64 struct rte_sched_port *sched;
65 uint32_t n_subports, subport_id;
67 /* Check input params */
69 softnic_tmgr_port_find(p, name))
76 /* Is hierarchy frozen? */
77 if (p->soft.tm.hierarchy_frozen == 0)
81 sched = rte_sched_port_config(&t->port_params);
86 n_subports = t->port_params.n_subports_per_port;
87 for (subport_id = 0; subport_id < n_subports; subport_id++) {
88 uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
92 status = rte_sched_subport_config(sched,
94 &t->subport_params[subport_id]);
96 rte_sched_port_free(sched);
101 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
102 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
103 int profile_id = t->pipe_to_profile[pos];
108 status = rte_sched_pipe_config(sched,
113 rte_sched_port_free(sched);
119 /* Node allocation */
120 tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
121 if (tmgr_port == NULL) {
122 rte_sched_port_free(sched);
127 strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
128 tmgr_port->s = sched;
130 /* Node add to list */
131 TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
136 static struct rte_sched_port *
137 SCHED(struct pmd_internals *p)
139 struct softnic_tmgr_port *tmgr_port;
141 tmgr_port = softnic_tmgr_port_find(p, "TMGR");
142 if (tmgr_port == NULL)
149 tm_hierarchy_init(struct pmd_internals *p)
151 memset(&p->soft.tm, 0, sizeof(p->soft.tm));
153 /* Initialize shaper profile list */
154 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
156 /* Initialize shared shaper list */
157 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
159 /* Initialize wred profile list */
160 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
162 /* Initialize TM node list */
163 TAILQ_INIT(&p->soft.tm.h.nodes);
167 tm_hierarchy_free(struct pmd_internals *p)
169 /* Remove all nodes*/
171 struct tm_node *tm_node;
173 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
177 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
181 /* Remove all WRED profiles */
183 struct tm_wred_profile *wred_profile;
185 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
186 if (wred_profile == NULL)
189 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
193 /* Remove all shared shapers */
195 struct tm_shared_shaper *shared_shaper;
197 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
198 if (shared_shaper == NULL)
201 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
205 /* Remove all shaper profiles */
207 struct tm_shaper_profile *shaper_profile;
209 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
210 if (shaper_profile == NULL)
213 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
214 shaper_profile, node);
215 free(shaper_profile);
218 tm_hierarchy_init(p);
221 static struct tm_shaper_profile *
222 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
224 struct pmd_internals *p = dev->data->dev_private;
225 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
226 struct tm_shaper_profile *sp;
228 TAILQ_FOREACH(sp, spl, node)
229 if (shaper_profile_id == sp->shaper_profile_id)
235 static struct tm_shared_shaper *
236 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
238 struct pmd_internals *p = dev->data->dev_private;
239 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
240 struct tm_shared_shaper *ss;
242 TAILQ_FOREACH(ss, ssl, node)
243 if (shared_shaper_id == ss->shared_shaper_id)
249 static struct tm_wred_profile *
250 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
252 struct pmd_internals *p = dev->data->dev_private;
253 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
254 struct tm_wred_profile *wp;
256 TAILQ_FOREACH(wp, wpl, node)
257 if (wred_profile_id == wp->wred_profile_id)
263 static struct tm_node *
264 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
266 struct pmd_internals *p = dev->data->dev_private;
267 struct tm_node_list *nl = &p->soft.tm.h.nodes;
270 TAILQ_FOREACH(n, nl, node)
271 if (n->node_id == node_id)
277 static struct tm_node *
278 tm_root_node_present(struct rte_eth_dev *dev)
280 struct pmd_internals *p = dev->data->dev_private;
281 struct tm_node_list *nl = &p->soft.tm.h.nodes;
284 TAILQ_FOREACH(n, nl, node)
285 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
292 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
294 struct pmd_internals *p = dev->data->dev_private;
295 struct tm_node_list *nl = &p->soft.tm.h.nodes;
300 TAILQ_FOREACH(ns, nl, node) {
301 if (ns->level != TM_NODE_LEVEL_SUBPORT)
304 if (ns->node_id == subport_node->node_id)
314 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
316 struct pmd_internals *p = dev->data->dev_private;
317 struct tm_node_list *nl = &p->soft.tm.h.nodes;
322 TAILQ_FOREACH(np, nl, node) {
323 if (np->level != TM_NODE_LEVEL_PIPE ||
324 np->parent_node_id != pipe_node->parent_node_id)
327 if (np->node_id == pipe_node->node_id)
337 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
339 return tc_node->priority;
343 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
345 struct pmd_internals *p = dev->data->dev_private;
346 struct tm_node_list *nl = &p->soft.tm.h.nodes;
351 TAILQ_FOREACH(nq, nl, node) {
352 if (nq->level != TM_NODE_LEVEL_QUEUE ||
353 nq->parent_node_id != queue_node->parent_node_id)
356 if (nq->node_id == queue_node->node_id)
366 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
368 struct pmd_internals *p = dev->data->dev_private;
369 uint32_t n_queues_max = p->params.tm.n_queues;
371 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
372 / RTE_SCHED_QUEUES_PER_PIPE;
373 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
374 uint32_t n_subports_max = n_pipes_max;
375 uint32_t n_root_max = 1;
378 case TM_NODE_LEVEL_PORT:
380 case TM_NODE_LEVEL_SUBPORT:
381 return n_subports_max;
382 case TM_NODE_LEVEL_PIPE:
384 case TM_NODE_LEVEL_TC:
386 case TM_NODE_LEVEL_QUEUE:
392 /* Traffic manager node type get */
394 pmd_tm_node_type_get(struct rte_eth_dev *dev,
397 struct rte_tm_error *error)
399 struct pmd_internals *p = dev->data->dev_private;
402 return -rte_tm_error_set(error,
404 RTE_TM_ERROR_TYPE_UNSPECIFIED,
406 rte_strerror(EINVAL));
408 if (node_id == RTE_TM_NODE_ID_NULL ||
409 (tm_node_search(dev, node_id) == NULL))
410 return -rte_tm_error_set(error,
412 RTE_TM_ERROR_TYPE_NODE_ID,
414 rte_strerror(EINVAL));
416 *is_leaf = node_id < p->params.tm.n_queues;
422 #define WRED_SUPPORTED 1
424 #define WRED_SUPPORTED 0
427 #define STATS_MASK_DEFAULT \
428 (RTE_TM_STATS_N_PKTS | \
429 RTE_TM_STATS_N_BYTES | \
430 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
431 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
433 #define STATS_MASK_QUEUE \
434 (STATS_MASK_DEFAULT | \
435 RTE_TM_STATS_N_PKTS_QUEUED)
437 static const struct rte_tm_capabilities tm_cap = {
438 .n_nodes_max = UINT32_MAX,
439 .n_levels_max = TM_NODE_LEVEL_MAX,
441 .non_leaf_nodes_identical = 0,
442 .leaf_nodes_identical = 1,
444 .shaper_n_max = UINT32_MAX,
445 .shaper_private_n_max = UINT32_MAX,
446 .shaper_private_dual_rate_n_max = 0,
447 .shaper_private_rate_min = 1,
448 .shaper_private_rate_max = UINT32_MAX,
450 .shaper_shared_n_max = UINT32_MAX,
451 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
452 .shaper_shared_n_shapers_per_node_max = 1,
453 .shaper_shared_dual_rate_n_max = 0,
454 .shaper_shared_rate_min = 1,
455 .shaper_shared_rate_max = UINT32_MAX,
457 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
458 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
460 .sched_n_children_max = UINT32_MAX,
461 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
462 .sched_wfq_n_children_per_group_max = UINT32_MAX,
463 .sched_wfq_n_groups_max = 1,
464 .sched_wfq_weight_max = UINT32_MAX,
466 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
467 .cman_wred_byte_mode_supported = 0,
468 .cman_head_drop_supported = 0,
469 .cman_wred_context_n_max = 0,
470 .cman_wred_context_private_n_max = 0,
471 .cman_wred_context_shared_n_max = 0,
472 .cman_wred_context_shared_n_nodes_per_context_max = 0,
473 .cman_wred_context_shared_n_contexts_per_node_max = 0,
475 .mark_vlan_dei_supported = {0, 0, 0},
476 .mark_ip_ecn_tcp_supported = {0, 0, 0},
477 .mark_ip_ecn_sctp_supported = {0, 0, 0},
478 .mark_ip_dscp_supported = {0, 0, 0},
480 .dynamic_update_mask = 0,
482 .stats_mask = STATS_MASK_QUEUE,
485 /* Traffic manager capabilities get */
487 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
488 struct rte_tm_capabilities *cap,
489 struct rte_tm_error *error)
492 return -rte_tm_error_set(error,
494 RTE_TM_ERROR_TYPE_CAPABILITIES,
496 rte_strerror(EINVAL));
498 memcpy(cap, &tm_cap, sizeof(*cap));
500 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
501 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
502 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
503 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
504 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
506 cap->shaper_private_n_max =
507 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
508 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
509 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
510 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
512 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
513 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
515 cap->shaper_n_max = cap->shaper_private_n_max +
516 cap->shaper_shared_n_max;
518 cap->shaper_shared_n_nodes_per_shaper_max =
519 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
521 cap->sched_n_children_max = RTE_MAX(
522 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
523 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
525 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
528 cap->cman_wred_context_private_n_max =
529 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
531 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
532 cap->cman_wred_context_shared_n_max;
537 static const struct rte_tm_level_capabilities tm_level_cap[] = {
538 [TM_NODE_LEVEL_PORT] = {
540 .n_nodes_nonleaf_max = 1,
541 .n_nodes_leaf_max = 0,
542 .non_leaf_nodes_identical = 1,
543 .leaf_nodes_identical = 0,
546 .shaper_private_supported = 1,
547 .shaper_private_dual_rate_supported = 0,
548 .shaper_private_rate_min = 1,
549 .shaper_private_rate_max = UINT32_MAX,
550 .shaper_shared_n_max = 0,
552 .sched_n_children_max = UINT32_MAX,
553 .sched_sp_n_priorities_max = 1,
554 .sched_wfq_n_children_per_group_max = UINT32_MAX,
555 .sched_wfq_n_groups_max = 1,
556 .sched_wfq_weight_max = 1,
558 .stats_mask = STATS_MASK_DEFAULT,
562 [TM_NODE_LEVEL_SUBPORT] = {
563 .n_nodes_max = UINT32_MAX,
564 .n_nodes_nonleaf_max = UINT32_MAX,
565 .n_nodes_leaf_max = 0,
566 .non_leaf_nodes_identical = 1,
567 .leaf_nodes_identical = 0,
570 .shaper_private_supported = 1,
571 .shaper_private_dual_rate_supported = 0,
572 .shaper_private_rate_min = 1,
573 .shaper_private_rate_max = UINT32_MAX,
574 .shaper_shared_n_max = 0,
576 .sched_n_children_max = UINT32_MAX,
577 .sched_sp_n_priorities_max = 1,
578 .sched_wfq_n_children_per_group_max = UINT32_MAX,
579 .sched_wfq_n_groups_max = 1,
580 #ifdef RTE_SCHED_SUBPORT_TC_OV
581 .sched_wfq_weight_max = UINT32_MAX,
583 .sched_wfq_weight_max = 1,
585 .stats_mask = STATS_MASK_DEFAULT,
589 [TM_NODE_LEVEL_PIPE] = {
590 .n_nodes_max = UINT32_MAX,
591 .n_nodes_nonleaf_max = UINT32_MAX,
592 .n_nodes_leaf_max = 0,
593 .non_leaf_nodes_identical = 1,
594 .leaf_nodes_identical = 0,
597 .shaper_private_supported = 1,
598 .shaper_private_dual_rate_supported = 0,
599 .shaper_private_rate_min = 1,
600 .shaper_private_rate_max = UINT32_MAX,
601 .shaper_shared_n_max = 0,
603 .sched_n_children_max =
604 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
605 .sched_sp_n_priorities_max =
606 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
607 .sched_wfq_n_children_per_group_max = 1,
608 .sched_wfq_n_groups_max = 0,
609 .sched_wfq_weight_max = 1,
611 .stats_mask = STATS_MASK_DEFAULT,
615 [TM_NODE_LEVEL_TC] = {
616 .n_nodes_max = UINT32_MAX,
617 .n_nodes_nonleaf_max = UINT32_MAX,
618 .n_nodes_leaf_max = 0,
619 .non_leaf_nodes_identical = 1,
620 .leaf_nodes_identical = 0,
623 .shaper_private_supported = 1,
624 .shaper_private_dual_rate_supported = 0,
625 .shaper_private_rate_min = 1,
626 .shaper_private_rate_max = UINT32_MAX,
627 .shaper_shared_n_max = 1,
629 .sched_n_children_max =
630 RTE_SCHED_BE_QUEUES_PER_PIPE,
631 .sched_sp_n_priorities_max = 1,
632 .sched_wfq_n_children_per_group_max =
633 RTE_SCHED_BE_QUEUES_PER_PIPE,
634 .sched_wfq_n_groups_max = 1,
635 .sched_wfq_weight_max = UINT32_MAX,
637 .stats_mask = STATS_MASK_DEFAULT,
641 [TM_NODE_LEVEL_QUEUE] = {
642 .n_nodes_max = UINT32_MAX,
643 .n_nodes_nonleaf_max = 0,
644 .n_nodes_leaf_max = UINT32_MAX,
645 .non_leaf_nodes_identical = 0,
646 .leaf_nodes_identical = 1,
649 .shaper_private_supported = 0,
650 .shaper_private_dual_rate_supported = 0,
651 .shaper_private_rate_min = 0,
652 .shaper_private_rate_max = 0,
653 .shaper_shared_n_max = 0,
655 .cman_head_drop_supported = 0,
656 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
657 .cman_wred_byte_mode_supported = 0,
658 .cman_wred_context_private_supported = WRED_SUPPORTED,
659 .cman_wred_context_shared_n_max = 0,
661 .stats_mask = STATS_MASK_QUEUE,
666 /* Traffic manager level capabilities get */
668 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
670 struct rte_tm_level_capabilities *cap,
671 struct rte_tm_error *error)
674 return -rte_tm_error_set(error,
676 RTE_TM_ERROR_TYPE_CAPABILITIES,
678 rte_strerror(EINVAL));
680 if (level_id >= TM_NODE_LEVEL_MAX)
681 return -rte_tm_error_set(error,
683 RTE_TM_ERROR_TYPE_LEVEL_ID,
685 rte_strerror(EINVAL));
687 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
690 case TM_NODE_LEVEL_PORT:
691 cap->nonleaf.sched_n_children_max =
692 tm_level_get_max_nodes(dev,
693 TM_NODE_LEVEL_SUBPORT);
694 cap->nonleaf.sched_wfq_n_children_per_group_max =
695 cap->nonleaf.sched_n_children_max;
698 case TM_NODE_LEVEL_SUBPORT:
699 cap->n_nodes_max = tm_level_get_max_nodes(dev,
700 TM_NODE_LEVEL_SUBPORT);
701 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
702 cap->nonleaf.sched_n_children_max =
703 tm_level_get_max_nodes(dev,
705 cap->nonleaf.sched_wfq_n_children_per_group_max =
706 cap->nonleaf.sched_n_children_max;
709 case TM_NODE_LEVEL_PIPE:
710 cap->n_nodes_max = tm_level_get_max_nodes(dev,
712 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
715 case TM_NODE_LEVEL_TC:
716 cap->n_nodes_max = tm_level_get_max_nodes(dev,
718 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
721 case TM_NODE_LEVEL_QUEUE:
723 cap->n_nodes_max = tm_level_get_max_nodes(dev,
724 TM_NODE_LEVEL_QUEUE);
725 cap->n_nodes_leaf_max = cap->n_nodes_max;
732 static const struct rte_tm_node_capabilities tm_node_cap[] = {
733 [TM_NODE_LEVEL_PORT] = {
734 .shaper_private_supported = 1,
735 .shaper_private_dual_rate_supported = 0,
736 .shaper_private_rate_min = 1,
737 .shaper_private_rate_max = UINT32_MAX,
738 .shaper_shared_n_max = 0,
741 .sched_n_children_max = UINT32_MAX,
742 .sched_sp_n_priorities_max = 1,
743 .sched_wfq_n_children_per_group_max = UINT32_MAX,
744 .sched_wfq_n_groups_max = 1,
745 .sched_wfq_weight_max = 1,
748 .stats_mask = STATS_MASK_DEFAULT,
751 [TM_NODE_LEVEL_SUBPORT] = {
752 .shaper_private_supported = 1,
753 .shaper_private_dual_rate_supported = 0,
754 .shaper_private_rate_min = 1,
755 .shaper_private_rate_max = UINT32_MAX,
756 .shaper_shared_n_max = 0,
759 .sched_n_children_max = UINT32_MAX,
760 .sched_sp_n_priorities_max = 1,
761 .sched_wfq_n_children_per_group_max = UINT32_MAX,
762 .sched_wfq_n_groups_max = 1,
763 .sched_wfq_weight_max = UINT32_MAX,
766 .stats_mask = STATS_MASK_DEFAULT,
769 [TM_NODE_LEVEL_PIPE] = {
770 .shaper_private_supported = 1,
771 .shaper_private_dual_rate_supported = 0,
772 .shaper_private_rate_min = 1,
773 .shaper_private_rate_max = UINT32_MAX,
774 .shaper_shared_n_max = 0,
777 .sched_n_children_max =
778 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
779 .sched_sp_n_priorities_max =
780 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
781 .sched_wfq_n_children_per_group_max = 1,
782 .sched_wfq_n_groups_max = 0,
783 .sched_wfq_weight_max = 1,
786 .stats_mask = STATS_MASK_DEFAULT,
789 [TM_NODE_LEVEL_TC] = {
790 .shaper_private_supported = 1,
791 .shaper_private_dual_rate_supported = 0,
792 .shaper_private_rate_min = 1,
793 .shaper_private_rate_max = UINT32_MAX,
794 .shaper_shared_n_max = 1,
797 .sched_n_children_max =
798 RTE_SCHED_BE_QUEUES_PER_PIPE,
799 .sched_sp_n_priorities_max = 1,
800 .sched_wfq_n_children_per_group_max =
801 RTE_SCHED_BE_QUEUES_PER_PIPE,
802 .sched_wfq_n_groups_max = 1,
803 .sched_wfq_weight_max = UINT32_MAX,
806 .stats_mask = STATS_MASK_DEFAULT,
809 [TM_NODE_LEVEL_QUEUE] = {
810 .shaper_private_supported = 0,
811 .shaper_private_dual_rate_supported = 0,
812 .shaper_private_rate_min = 0,
813 .shaper_private_rate_max = 0,
814 .shaper_shared_n_max = 0,
818 .cman_head_drop_supported = 0,
819 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
820 .cman_wred_byte_mode_supported = 0,
821 .cman_wred_context_private_supported = WRED_SUPPORTED,
822 .cman_wred_context_shared_n_max = 0,
825 .stats_mask = STATS_MASK_QUEUE,
829 /* Traffic manager node capabilities get */
831 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
833 struct rte_tm_node_capabilities *cap,
834 struct rte_tm_error *error)
836 struct tm_node *tm_node;
839 return -rte_tm_error_set(error,
841 RTE_TM_ERROR_TYPE_CAPABILITIES,
843 rte_strerror(EINVAL));
845 tm_node = tm_node_search(dev, node_id);
847 return -rte_tm_error_set(error,
849 RTE_TM_ERROR_TYPE_NODE_ID,
851 rte_strerror(EINVAL));
853 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
855 switch (tm_node->level) {
856 case TM_NODE_LEVEL_PORT:
857 cap->nonleaf.sched_n_children_max =
858 tm_level_get_max_nodes(dev,
859 TM_NODE_LEVEL_SUBPORT);
860 cap->nonleaf.sched_wfq_n_children_per_group_max =
861 cap->nonleaf.sched_n_children_max;
864 case TM_NODE_LEVEL_SUBPORT:
865 cap->nonleaf.sched_n_children_max =
866 tm_level_get_max_nodes(dev,
868 cap->nonleaf.sched_wfq_n_children_per_group_max =
869 cap->nonleaf.sched_n_children_max;
872 case TM_NODE_LEVEL_PIPE:
873 case TM_NODE_LEVEL_TC:
874 case TM_NODE_LEVEL_QUEUE:
883 shaper_profile_check(struct rte_eth_dev *dev,
884 uint32_t shaper_profile_id,
885 struct rte_tm_shaper_params *profile,
886 struct rte_tm_error *error)
888 struct tm_shaper_profile *sp;
890 /* Shaper profile ID must not be NONE. */
891 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
892 return -rte_tm_error_set(error,
894 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
896 rte_strerror(EINVAL));
898 /* Shaper profile must not exist. */
899 sp = tm_shaper_profile_search(dev, shaper_profile_id);
901 return -rte_tm_error_set(error,
903 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
905 rte_strerror(EEXIST));
907 /* Profile must not be NULL. */
909 return -rte_tm_error_set(error,
911 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
913 rte_strerror(EINVAL));
915 /* Peak rate: non-zero, 32-bit */
916 if (profile->peak.rate == 0 ||
917 profile->peak.rate >= UINT32_MAX)
918 return -rte_tm_error_set(error,
920 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
922 rte_strerror(EINVAL));
924 /* Peak size: non-zero, 32-bit */
925 if (profile->peak.size == 0 ||
926 profile->peak.size >= UINT32_MAX)
927 return -rte_tm_error_set(error,
929 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
931 rte_strerror(EINVAL));
933 /* Dual-rate profiles are not supported. */
934 if (profile->committed.rate != 0)
935 return -rte_tm_error_set(error,
937 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
939 rte_strerror(EINVAL));
941 /* Packet length adjust: 24 bytes */
942 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
943 return -rte_tm_error_set(error,
945 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
947 rte_strerror(EINVAL));
952 /* Traffic manager shaper profile add */
954 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
955 uint32_t shaper_profile_id,
956 struct rte_tm_shaper_params *profile,
957 struct rte_tm_error *error)
959 struct pmd_internals *p = dev->data->dev_private;
960 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
961 struct tm_shaper_profile *sp;
964 /* Check input params */
965 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
969 /* Memory allocation */
970 sp = calloc(1, sizeof(struct tm_shaper_profile));
972 return -rte_tm_error_set(error,
974 RTE_TM_ERROR_TYPE_UNSPECIFIED,
976 rte_strerror(ENOMEM));
979 sp->shaper_profile_id = shaper_profile_id;
980 memcpy(&sp->params, profile, sizeof(sp->params));
983 TAILQ_INSERT_TAIL(spl, sp, node);
984 p->soft.tm.h.n_shaper_profiles++;
989 /* Traffic manager shaper profile delete */
991 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
992 uint32_t shaper_profile_id,
993 struct rte_tm_error *error)
995 struct pmd_internals *p = dev->data->dev_private;
996 struct tm_shaper_profile *sp;
999 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1001 return -rte_tm_error_set(error,
1003 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1005 rte_strerror(EINVAL));
1009 return -rte_tm_error_set(error,
1011 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1013 rte_strerror(EBUSY));
1015 /* Remove from list */
1016 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1017 p->soft.tm.h.n_shaper_profiles--;
1023 static struct tm_node *
1024 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1025 struct tm_shared_shaper *ss)
1027 struct pmd_internals *p = dev->data->dev_private;
1028 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1031 /* Subport: each TC uses shared shaper */
1032 TAILQ_FOREACH(n, nl, node) {
1033 if (n->level != TM_NODE_LEVEL_TC ||
1034 n->params.n_shared_shapers == 0 ||
1035 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1045 update_subport_tc_rate(struct rte_eth_dev *dev,
1047 struct tm_shared_shaper *ss,
1048 struct tm_shaper_profile *sp_new)
1050 struct pmd_internals *p = dev->data->dev_private;
1051 uint32_t tc_id = tm_node_tc_id(dev, nt);
1053 struct tm_node *np = nt->parent_node;
1055 struct tm_node *ns = np->parent_node;
1056 uint32_t subport_id = tm_node_subport_id(dev, ns);
1058 struct rte_sched_subport_params subport_params;
1060 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1061 ss->shaper_profile_id);
1063 /* Derive new subport configuration. */
1064 memcpy(&subport_params,
1065 &p->soft.tm.params.subport_params[subport_id],
1066 sizeof(subport_params));
1067 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1069 /* Update the subport configuration. */
1070 if (rte_sched_subport_config(SCHED(p),
1071 subport_id, &subport_params))
1074 /* Commit changes. */
1077 ss->shaper_profile_id = sp_new->shaper_profile_id;
1080 memcpy(&p->soft.tm.params.subport_params[subport_id],
1082 sizeof(subport_params));
1087 /* Traffic manager shared shaper add/update */
1089 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1090 uint32_t shared_shaper_id,
1091 uint32_t shaper_profile_id,
1092 struct rte_tm_error *error)
1094 struct pmd_internals *p = dev->data->dev_private;
1095 struct tm_shared_shaper *ss;
1096 struct tm_shaper_profile *sp;
1099 /* Shaper profile must be valid. */
1100 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1102 return -rte_tm_error_set(error,
1104 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1106 rte_strerror(EINVAL));
1109 * Add new shared shaper
1111 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1113 struct tm_shared_shaper_list *ssl =
1114 &p->soft.tm.h.shared_shapers;
1116 /* Hierarchy must not be frozen */
1117 if (p->soft.tm.hierarchy_frozen)
1118 return -rte_tm_error_set(error,
1120 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1122 rte_strerror(EBUSY));
1124 /* Memory allocation */
1125 ss = calloc(1, sizeof(struct tm_shared_shaper));
1127 return -rte_tm_error_set(error,
1129 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1131 rte_strerror(ENOMEM));
1134 ss->shared_shaper_id = shared_shaper_id;
1135 ss->shaper_profile_id = shaper_profile_id;
1138 TAILQ_INSERT_TAIL(ssl, ss, node);
1139 p->soft.tm.h.n_shared_shapers++;
1145 * Update existing shared shaper
1147 /* Hierarchy must be frozen (run-time update) */
1148 if (p->soft.tm.hierarchy_frozen == 0)
1149 return -rte_tm_error_set(error,
1151 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1153 rte_strerror(EBUSY));
1156 /* Propagate change. */
1157 nt = tm_shared_shaper_get_tc(dev, ss);
1158 if (update_subport_tc_rate(dev, nt, ss, sp))
1159 return -rte_tm_error_set(error,
1161 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1163 rte_strerror(EINVAL));
1168 /* Traffic manager shared shaper delete */
1170 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1171 uint32_t shared_shaper_id,
1172 struct rte_tm_error *error)
1174 struct pmd_internals *p = dev->data->dev_private;
1175 struct tm_shared_shaper *ss;
1177 /* Check existing */
1178 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1180 return -rte_tm_error_set(error,
1182 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1184 rte_strerror(EINVAL));
1188 return -rte_tm_error_set(error,
1190 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1192 rte_strerror(EBUSY));
1194 /* Remove from list */
1195 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1196 p->soft.tm.h.n_shared_shapers--;
1203 wred_profile_check(struct rte_eth_dev *dev,
1204 uint32_t wred_profile_id,
1205 struct rte_tm_wred_params *profile,
1206 struct rte_tm_error *error)
1208 struct tm_wred_profile *wp;
1209 enum rte_color color;
1211 /* WRED profile ID must not be NONE. */
1212 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1213 return -rte_tm_error_set(error,
1215 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1217 rte_strerror(EINVAL));
1219 /* WRED profile must not exist. */
1220 wp = tm_wred_profile_search(dev, wred_profile_id);
1222 return -rte_tm_error_set(error,
1224 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1226 rte_strerror(EEXIST));
1228 /* Profile must not be NULL. */
1229 if (profile == NULL)
1230 return -rte_tm_error_set(error,
1232 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1234 rte_strerror(EINVAL));
1236 /* WRED profile should be in packet mode */
1237 if (profile->packet_mode == 0)
1238 return -rte_tm_error_set(error,
1240 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1242 rte_strerror(ENOTSUP));
1244 /* min_th <= max_th, max_th > 0 */
1245 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1246 uint32_t min_th = profile->red_params[color].min_th;
1247 uint32_t max_th = profile->red_params[color].max_th;
1249 if (min_th > max_th ||
1251 min_th > UINT16_MAX ||
1252 max_th > UINT16_MAX)
1253 return -rte_tm_error_set(error,
1255 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1257 rte_strerror(EINVAL));
1263 /* Traffic manager WRED profile add */
1265 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1266 uint32_t wred_profile_id,
1267 struct rte_tm_wred_params *profile,
1268 struct rte_tm_error *error)
1270 struct pmd_internals *p = dev->data->dev_private;
1271 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1272 struct tm_wred_profile *wp;
1275 /* Check input params */
1276 status = wred_profile_check(dev, wred_profile_id, profile, error);
1280 /* Memory allocation */
1281 wp = calloc(1, sizeof(struct tm_wred_profile));
1283 return -rte_tm_error_set(error,
1285 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1287 rte_strerror(ENOMEM));
1290 wp->wred_profile_id = wred_profile_id;
1291 memcpy(&wp->params, profile, sizeof(wp->params));
1294 TAILQ_INSERT_TAIL(wpl, wp, node);
1295 p->soft.tm.h.n_wred_profiles++;
1300 /* Traffic manager WRED profile delete */
1302 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1303 uint32_t wred_profile_id,
1304 struct rte_tm_error *error)
1306 struct pmd_internals *p = dev->data->dev_private;
1307 struct tm_wred_profile *wp;
1309 /* Check existing */
1310 wp = tm_wred_profile_search(dev, wred_profile_id);
1312 return -rte_tm_error_set(error,
1314 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1316 rte_strerror(EINVAL));
1320 return -rte_tm_error_set(error,
1322 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1324 rte_strerror(EBUSY));
1326 /* Remove from list */
1327 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1328 p->soft.tm.h.n_wred_profiles--;
1335 node_add_check_port(struct rte_eth_dev *dev,
1337 uint32_t parent_node_id __rte_unused,
1340 uint32_t level_id __rte_unused,
1341 struct rte_tm_node_params *params,
1342 struct rte_tm_error *error)
1344 struct pmd_internals *p = dev->data->dev_private;
1345 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1346 params->shaper_profile_id);
1348 /* node type: non-leaf */
1349 if (node_id < p->params.tm.n_queues)
1350 return -rte_tm_error_set(error,
1352 RTE_TM_ERROR_TYPE_NODE_ID,
1354 rte_strerror(EINVAL));
1356 /* Priority must be 0 */
1358 return -rte_tm_error_set(error,
1360 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1362 rte_strerror(EINVAL));
1364 /* Weight must be 1 */
1366 return -rte_tm_error_set(error,
1368 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1370 rte_strerror(EINVAL));
1372 /* Shaper must be valid */
1373 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1375 return -rte_tm_error_set(error,
1377 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1379 rte_strerror(EINVAL));
1381 /* No shared shapers */
1382 if (params->n_shared_shapers != 0)
1383 return -rte_tm_error_set(error,
1385 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1387 rte_strerror(EINVAL));
1389 /* Number of SP priorities must be 1 */
1390 if (params->nonleaf.n_sp_priorities != 1)
1391 return -rte_tm_error_set(error,
1393 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1395 rte_strerror(EINVAL));
1398 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1399 return -rte_tm_error_set(error,
1401 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1403 rte_strerror(EINVAL));
1409 node_add_check_subport(struct rte_eth_dev *dev,
1411 uint32_t parent_node_id __rte_unused,
1414 uint32_t level_id __rte_unused,
1415 struct rte_tm_node_params *params,
1416 struct rte_tm_error *error)
1418 struct pmd_internals *p = dev->data->dev_private;
1420 /* node type: non-leaf */
1421 if (node_id < p->params.tm.n_queues)
1422 return -rte_tm_error_set(error,
1424 RTE_TM_ERROR_TYPE_NODE_ID,
1426 rte_strerror(EINVAL));
1428 /* Priority must be 0 */
1430 return -rte_tm_error_set(error,
1432 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1434 rte_strerror(EINVAL));
1436 /* Weight must be 1 */
1438 return -rte_tm_error_set(error,
1440 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1442 rte_strerror(EINVAL));
1444 /* Shaper must be valid */
1445 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1446 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1447 return -rte_tm_error_set(error,
1449 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1451 rte_strerror(EINVAL));
1453 /* No shared shapers */
1454 if (params->n_shared_shapers != 0)
1455 return -rte_tm_error_set(error,
1457 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1459 rte_strerror(EINVAL));
1461 /* Number of SP priorities must be 1 */
1462 if (params->nonleaf.n_sp_priorities != 1)
1463 return -rte_tm_error_set(error,
1465 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1467 rte_strerror(EINVAL));
1470 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1471 return -rte_tm_error_set(error,
1473 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1475 rte_strerror(EINVAL));
1481 node_add_check_pipe(struct rte_eth_dev *dev,
1483 uint32_t parent_node_id __rte_unused,
1485 uint32_t weight __rte_unused,
1486 uint32_t level_id __rte_unused,
1487 struct rte_tm_node_params *params,
1488 struct rte_tm_error *error)
1490 struct pmd_internals *p = dev->data->dev_private;
1492 /* node type: non-leaf */
1493 if (node_id < p->params.tm.n_queues)
1494 return -rte_tm_error_set(error,
1496 RTE_TM_ERROR_TYPE_NODE_ID,
1498 rte_strerror(EINVAL));
1500 /* Priority must be 0 */
1502 return -rte_tm_error_set(error,
1504 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1506 rte_strerror(EINVAL));
1508 /* Shaper must be valid */
1509 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1510 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1511 return -rte_tm_error_set(error,
1513 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1515 rte_strerror(EINVAL));
1517 /* No shared shapers */
1518 if (params->n_shared_shapers != 0)
1519 return -rte_tm_error_set(error,
1521 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1523 rte_strerror(EINVAL));
1525 /* Number of SP priorities must be 4 */
1526 if (params->nonleaf.n_sp_priorities !=
1527 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1528 return -rte_tm_error_set(error,
1530 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1532 rte_strerror(EINVAL));
1534 /* WFQ mode must be byte mode */
1535 if (params->nonleaf.wfq_weight_mode != NULL &&
1536 params->nonleaf.wfq_weight_mode[0] != 0 &&
1537 params->nonleaf.wfq_weight_mode[1] != 0 &&
1538 params->nonleaf.wfq_weight_mode[2] != 0 &&
1539 params->nonleaf.wfq_weight_mode[3] != 0)
1540 return -rte_tm_error_set(error,
1542 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1544 rte_strerror(EINVAL));
1547 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1548 return -rte_tm_error_set(error,
1550 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1552 rte_strerror(EINVAL));
1558 node_add_check_tc(struct rte_eth_dev *dev,
1560 uint32_t parent_node_id __rte_unused,
1561 uint32_t priority __rte_unused,
1563 uint32_t level_id __rte_unused,
1564 struct rte_tm_node_params *params,
1565 struct rte_tm_error *error)
1567 struct pmd_internals *p = dev->data->dev_private;
1569 /* node type: non-leaf */
1570 if (node_id < p->params.tm.n_queues)
1571 return -rte_tm_error_set(error,
1573 RTE_TM_ERROR_TYPE_NODE_ID,
1575 rte_strerror(EINVAL));
1577 /* Weight must be 1 */
1579 return -rte_tm_error_set(error,
1581 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1583 rte_strerror(EINVAL));
1585 /* Shaper must be valid */
1586 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1587 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1588 return -rte_tm_error_set(error,
1590 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1592 rte_strerror(EINVAL));
1594 /* Single valid shared shaper */
1595 if (params->n_shared_shapers > 1)
1596 return -rte_tm_error_set(error,
1598 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1600 rte_strerror(EINVAL));
1602 if (params->n_shared_shapers == 1 &&
1603 (params->shared_shaper_id == NULL ||
1604 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1605 return -rte_tm_error_set(error,
1607 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1609 rte_strerror(EINVAL));
1611 /* Number of priorities must be 1 */
1612 if (params->nonleaf.n_sp_priorities != 1)
1613 return -rte_tm_error_set(error,
1615 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1617 rte_strerror(EINVAL));
1620 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1621 return -rte_tm_error_set(error,
1623 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1625 rte_strerror(EINVAL));
1631 node_add_check_queue(struct rte_eth_dev *dev,
1633 uint32_t parent_node_id __rte_unused,
1635 uint32_t weight __rte_unused,
1636 uint32_t level_id __rte_unused,
1637 struct rte_tm_node_params *params,
1638 struct rte_tm_error *error)
1640 struct pmd_internals *p = dev->data->dev_private;
1642 /* node type: leaf */
1643 if (node_id >= p->params.tm.n_queues)
1644 return -rte_tm_error_set(error,
1646 RTE_TM_ERROR_TYPE_NODE_ID,
1648 rte_strerror(EINVAL));
1650 /* Priority must be 0 */
1652 return -rte_tm_error_set(error,
1654 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1656 rte_strerror(EINVAL));
1659 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1660 return -rte_tm_error_set(error,
1662 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1664 rte_strerror(EINVAL));
1666 /* No shared shapers */
1667 if (params->n_shared_shapers != 0)
1668 return -rte_tm_error_set(error,
1670 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1672 rte_strerror(EINVAL));
1674 /* Congestion management must not be head drop */
1675 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1676 return -rte_tm_error_set(error,
1678 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1680 rte_strerror(EINVAL));
1682 /* Congestion management set to WRED */
1683 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1684 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1685 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1688 /* WRED profile (for private WRED context) must be valid */
1689 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1691 return -rte_tm_error_set(error,
1693 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1695 rte_strerror(EINVAL));
1697 /* No shared WRED contexts */
1698 if (params->leaf.wred.n_shared_wred_contexts != 0)
1699 return -rte_tm_error_set(error,
1701 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1703 rte_strerror(EINVAL));
1707 if (params->stats_mask & ~STATS_MASK_QUEUE)
1708 return -rte_tm_error_set(error,
1710 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1712 rte_strerror(EINVAL));
1718 node_add_check(struct rte_eth_dev *dev,
1720 uint32_t parent_node_id,
1724 struct rte_tm_node_params *params,
1725 struct rte_tm_error *error)
1731 /* node_id, parent_node_id:
1732 * -node_id must not be RTE_TM_NODE_ID_NULL
1733 * -node_id must not be in use
1734 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1735 * -root node must not exist
1736 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1737 * -parent_node_id must be valid
1739 if (node_id == RTE_TM_NODE_ID_NULL)
1740 return -rte_tm_error_set(error,
1742 RTE_TM_ERROR_TYPE_NODE_ID,
1744 rte_strerror(EINVAL));
1746 if (tm_node_search(dev, node_id))
1747 return -rte_tm_error_set(error,
1749 RTE_TM_ERROR_TYPE_NODE_ID,
1751 rte_strerror(EEXIST));
1753 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1755 if (tm_root_node_present(dev))
1756 return -rte_tm_error_set(error,
1758 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1760 rte_strerror(EEXIST));
1762 pn = tm_node_search(dev, parent_node_id);
1764 return -rte_tm_error_set(error,
1766 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1768 rte_strerror(EINVAL));
1771 /* priority: must be 0 .. 3 */
1772 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1773 return -rte_tm_error_set(error,
1775 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1777 rte_strerror(EINVAL));
1779 /* weight: must be 1 .. 255 */
1780 if (weight == 0 || weight >= UINT8_MAX)
1781 return -rte_tm_error_set(error,
1783 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1785 rte_strerror(EINVAL));
1787 /* level_id: if valid, then
1788 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1789 * -level_id must be zero
1790 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1791 * -level_id must be parent level ID plus one
1793 level = (pn == NULL) ? 0 : pn->level + 1;
1794 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1795 return -rte_tm_error_set(error,
1797 RTE_TM_ERROR_TYPE_LEVEL_ID,
1799 rte_strerror(EINVAL));
1801 /* params: must not be NULL */
1803 return -rte_tm_error_set(error,
1805 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1807 rte_strerror(EINVAL));
1809 /* params: per level checks */
1811 case TM_NODE_LEVEL_PORT:
1812 status = node_add_check_port(dev, node_id,
1813 parent_node_id, priority, weight, level_id,
1819 case TM_NODE_LEVEL_SUBPORT:
1820 status = node_add_check_subport(dev, node_id,
1821 parent_node_id, priority, weight, level_id,
1827 case TM_NODE_LEVEL_PIPE:
1828 status = node_add_check_pipe(dev, node_id,
1829 parent_node_id, priority, weight, level_id,
1835 case TM_NODE_LEVEL_TC:
1836 status = node_add_check_tc(dev, node_id,
1837 parent_node_id, priority, weight, level_id,
1843 case TM_NODE_LEVEL_QUEUE:
1844 status = node_add_check_queue(dev, node_id,
1845 parent_node_id, priority, weight, level_id,
1852 return -rte_tm_error_set(error,
1854 RTE_TM_ERROR_TYPE_LEVEL_ID,
1856 rte_strerror(EINVAL));
1862 /* Traffic manager node add */
1864 pmd_tm_node_add(struct rte_eth_dev *dev,
1866 uint32_t parent_node_id,
1870 struct rte_tm_node_params *params,
1871 struct rte_tm_error *error)
1873 struct pmd_internals *p = dev->data->dev_private;
1874 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1880 if (p->soft.tm.hierarchy_frozen)
1881 return -rte_tm_error_set(error,
1883 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1885 rte_strerror(EBUSY));
1887 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1888 level_id, params, error);
1892 /* Memory allocation */
1893 n = calloc(1, sizeof(struct tm_node));
1895 return -rte_tm_error_set(error,
1897 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1899 rte_strerror(ENOMEM));
1902 n->node_id = node_id;
1903 n->parent_node_id = parent_node_id;
1904 n->priority = priority;
1907 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1908 n->parent_node = tm_node_search(dev, parent_node_id);
1909 n->level = n->parent_node->level + 1;
1912 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1913 n->shaper_profile = tm_shaper_profile_search(dev,
1914 params->shaper_profile_id);
1916 if (n->level == TM_NODE_LEVEL_QUEUE &&
1917 params->leaf.cman == RTE_TM_CMAN_WRED)
1918 n->wred_profile = tm_wred_profile_search(dev,
1919 params->leaf.wred.wred_profile_id);
1921 memcpy(&n->params, params, sizeof(n->params));
1924 TAILQ_INSERT_TAIL(nl, n, node);
1925 p->soft.tm.h.n_nodes++;
1927 /* Update dependencies */
1929 n->parent_node->n_children++;
1931 if (n->shaper_profile)
1932 n->shaper_profile->n_users++;
1934 for (i = 0; i < params->n_shared_shapers; i++) {
1935 struct tm_shared_shaper *ss;
1937 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1941 if (n->wred_profile)
1942 n->wred_profile->n_users++;
1944 p->soft.tm.h.n_tm_nodes[n->level]++;
1949 /* Traffic manager node delete */
1951 pmd_tm_node_delete(struct rte_eth_dev *dev,
1953 struct rte_tm_error *error)
1955 struct pmd_internals *p = dev->data->dev_private;
1959 /* Check hierarchy changes are currently allowed */
1960 if (p->soft.tm.hierarchy_frozen)
1961 return -rte_tm_error_set(error,
1963 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1965 rte_strerror(EBUSY));
1967 /* Check existing */
1968 n = tm_node_search(dev, node_id);
1970 return -rte_tm_error_set(error,
1972 RTE_TM_ERROR_TYPE_NODE_ID,
1974 rte_strerror(EINVAL));
1978 return -rte_tm_error_set(error,
1980 RTE_TM_ERROR_TYPE_NODE_ID,
1982 rte_strerror(EBUSY));
1984 /* Update dependencies */
1985 p->soft.tm.h.n_tm_nodes[n->level]--;
1987 if (n->wred_profile)
1988 n->wred_profile->n_users--;
1990 for (i = 0; i < n->params.n_shared_shapers; i++) {
1991 struct tm_shared_shaper *ss;
1993 ss = tm_shared_shaper_search(dev,
1994 n->params.shared_shaper_id[i]);
1998 if (n->shaper_profile)
1999 n->shaper_profile->n_users--;
2002 n->parent_node->n_children--;
2004 /* Remove from list */
2005 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2006 p->soft.tm.h.n_nodes--;
2014 pipe_profile_build(struct rte_eth_dev *dev,
2016 struct rte_sched_pipe_params *pp)
2018 struct pmd_internals *p = dev->data->dev_private;
2019 struct tm_hierarchy *h = &p->soft.tm.h;
2020 struct tm_node_list *nl = &h->nodes;
2021 struct tm_node *nt, *nq;
2023 memset(pp, 0, sizeof(*pp));
2026 pp->tb_rate = np->shaper_profile->params.peak.rate;
2027 pp->tb_size = np->shaper_profile->params.peak.size;
2029 /* Traffic Class (TC) */
2030 pp->tc_period = PIPE_TC_PERIOD;
2032 pp->tc_ov_weight = np->weight;
2034 TAILQ_FOREACH(nt, nl, node) {
2035 uint32_t queue_id = 0;
2037 if (nt->level != TM_NODE_LEVEL_TC ||
2038 nt->parent_node_id != np->node_id)
2041 pp->tc_rate[nt->priority] =
2042 nt->shaper_profile->params.peak.rate;
2045 TAILQ_FOREACH(nq, nl, node) {
2047 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2048 nq->parent_node_id != nt->node_id)
2051 if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2052 pp->wrr_weights[queue_id] = nq->weight;
2060 pipe_profile_free_exists(struct rte_eth_dev *dev,
2061 uint32_t *pipe_profile_id)
2063 struct pmd_internals *p = dev->data->dev_private;
2064 struct tm_params *t = &p->soft.tm.params;
2066 if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2067 *pipe_profile_id = t->n_pipe_profiles;
2075 pipe_profile_exists(struct rte_eth_dev *dev,
2076 struct rte_sched_pipe_params *pp,
2077 uint32_t *pipe_profile_id)
2079 struct pmd_internals *p = dev->data->dev_private;
2080 struct tm_params *t = &p->soft.tm.params;
2083 for (i = 0; i < t->n_pipe_profiles; i++)
2084 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2085 if (pipe_profile_id)
2086 *pipe_profile_id = i;
2094 pipe_profile_install(struct rte_eth_dev *dev,
2095 struct rte_sched_pipe_params *pp,
2096 uint32_t pipe_profile_id)
2098 struct pmd_internals *p = dev->data->dev_private;
2099 struct tm_params *t = &p->soft.tm.params;
2101 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2102 t->n_pipe_profiles++;
2106 pipe_profile_mark(struct rte_eth_dev *dev,
2107 uint32_t subport_id,
2109 uint32_t pipe_profile_id)
2111 struct pmd_internals *p = dev->data->dev_private;
2112 struct tm_hierarchy *h = &p->soft.tm.h;
2113 struct tm_params *t = &p->soft.tm.params;
2114 uint32_t n_pipes_per_subport, pos;
2116 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2117 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2118 pos = subport_id * n_pipes_per_subport + pipe_id;
2120 t->pipe_to_profile[pos] = pipe_profile_id;
2123 static struct rte_sched_pipe_params *
2124 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2126 struct pmd_internals *p = dev->data->dev_private;
2127 struct tm_hierarchy *h = &p->soft.tm.h;
2128 struct tm_params *t = &p->soft.tm.params;
2129 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2130 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2132 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2133 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2135 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2136 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2138 return &t->pipe_profiles[pipe_profile_id];
2142 pipe_profiles_generate(struct rte_eth_dev *dev)
2144 struct pmd_internals *p = dev->data->dev_private;
2145 struct tm_hierarchy *h = &p->soft.tm.h;
2146 struct tm_node_list *nl = &h->nodes;
2147 struct tm_node *ns, *np;
2148 uint32_t subport_id;
2150 /* Objective: Fill in the following fields in struct tm_params:
2157 TAILQ_FOREACH(ns, nl, node) {
2160 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2164 TAILQ_FOREACH(np, nl, node) {
2165 struct rte_sched_pipe_params pp;
2168 if (np->level != TM_NODE_LEVEL_PIPE ||
2169 np->parent_node_id != ns->node_id)
2172 pipe_profile_build(dev, np, &pp);
2174 if (!pipe_profile_exists(dev, &pp, &pos)) {
2175 if (!pipe_profile_free_exists(dev, &pos))
2178 pipe_profile_install(dev, &pp, pos);
2181 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2192 static struct tm_wred_profile *
2193 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2195 struct pmd_internals *p = dev->data->dev_private;
2196 struct tm_hierarchy *h = &p->soft.tm.h;
2197 struct tm_node_list *nl = &h->nodes;
2200 TAILQ_FOREACH(nq, nl, node) {
2201 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2202 nq->parent_node->priority != tc_id)
2205 return nq->wred_profile;
2211 #ifdef RTE_SCHED_RED
2214 wred_profiles_set(struct rte_eth_dev *dev)
2216 struct pmd_internals *p = dev->data->dev_private;
2217 struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2220 enum rte_color color;
2222 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2223 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2224 struct rte_red_params *dst =
2225 &pp->red_params[tc_id][color];
2226 struct tm_wred_profile *src_wp =
2227 tm_tc_wred_profile_get(dev, tc_id);
2228 struct rte_tm_red_params *src =
2229 &src_wp->params.red_params[color];
2231 memcpy(dst, src, sizeof(*dst));
2237 #define wred_profiles_set(dev)
2241 static struct tm_shared_shaper *
2242 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2244 return (tc_node->params.n_shared_shapers) ?
2245 tm_shared_shaper_search(dev,
2246 tc_node->params.shared_shaper_id[0]) :
2250 static struct tm_shared_shaper *
2251 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2252 struct tm_node *subport_node,
2255 struct pmd_internals *p = dev->data->dev_private;
2256 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2259 TAILQ_FOREACH(n, nl, node) {
2260 if (n->level != TM_NODE_LEVEL_TC ||
2261 n->parent_node->parent_node_id !=
2262 subport_node->node_id ||
2263 n->priority != tc_id)
2266 return tm_tc_shared_shaper_get(dev, n);
2273 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2275 struct pmd_internals *p = dev->data->dev_private;
2276 struct tm_hierarchy *h = &p->soft.tm.h;
2277 struct tm_node_list *nl = &h->nodes;
2278 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2279 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2280 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2281 struct tm_shared_shaper *ss;
2283 uint32_t n_pipes_per_subport;
2285 /* Root node exists. */
2287 return -rte_tm_error_set(error,
2289 RTE_TM_ERROR_TYPE_LEVEL_ID,
2291 rte_strerror(EINVAL));
2293 /* There is at least one subport, max is not exceeded. */
2294 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2295 return -rte_tm_error_set(error,
2297 RTE_TM_ERROR_TYPE_LEVEL_ID,
2299 rte_strerror(EINVAL));
2301 /* There is at least one pipe. */
2302 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2303 return -rte_tm_error_set(error,
2305 RTE_TM_ERROR_TYPE_LEVEL_ID,
2307 rte_strerror(EINVAL));
2309 /* Number of pipes is the same for all subports. Maximum number of pipes
2310 * per subport is not exceeded.
2312 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2313 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2315 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2316 return -rte_tm_error_set(error,
2318 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2320 rte_strerror(EINVAL));
2322 TAILQ_FOREACH(ns, nl, node) {
2323 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2326 if (ns->n_children != n_pipes_per_subport)
2327 return -rte_tm_error_set(error,
2329 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2331 rte_strerror(EINVAL));
2334 /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2335 TAILQ_FOREACH(np, nl, node) {
2336 uint32_t mask = 0, mask_expected =
2337 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2340 if (np->level != TM_NODE_LEVEL_PIPE)
2343 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2344 return -rte_tm_error_set(error,
2346 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2348 rte_strerror(EINVAL));
2350 TAILQ_FOREACH(nt, nl, node) {
2351 if (nt->level != TM_NODE_LEVEL_TC ||
2352 nt->parent_node_id != np->node_id)
2355 mask |= 1 << nt->priority;
2358 if (mask != mask_expected)
2359 return -rte_tm_error_set(error,
2361 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2363 rte_strerror(EINVAL));
2366 /** Each Strict priority TC has exactly 1 packet queues while
2367 * lowest priority TC (Best-effort) has 4 queues.
2369 TAILQ_FOREACH(nt, nl, node) {
2370 if (nt->level != TM_NODE_LEVEL_TC)
2373 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2374 return -rte_tm_error_set(error,
2376 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2378 rte_strerror(EINVAL));
2383 * -For each TC #i, all pipes in the same subport use the same
2384 * shared shaper (or no shared shaper) for their TC#i.
2385 * -Each shared shaper needs to have at least one user. All its
2386 * users have to be TC nodes with the same priority and the same
2389 TAILQ_FOREACH(ns, nl, node) {
2390 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2393 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2396 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2397 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2399 TAILQ_FOREACH(nt, nl, node) {
2400 struct tm_shared_shaper *subport_ss, *tc_ss;
2402 if (nt->level != TM_NODE_LEVEL_TC ||
2403 nt->parent_node->parent_node_id !=
2407 subport_ss = s[nt->priority];
2408 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2410 if (subport_ss == NULL && tc_ss == NULL)
2413 if ((subport_ss == NULL && tc_ss != NULL) ||
2414 (subport_ss != NULL && tc_ss == NULL) ||
2415 subport_ss->shared_shaper_id !=
2416 tc_ss->shared_shaper_id)
2417 return -rte_tm_error_set(error,
2419 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2421 rte_strerror(EINVAL));
2425 TAILQ_FOREACH(ss, ssl, node) {
2426 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2427 uint32_t n_users = 0;
2430 TAILQ_FOREACH(nt, nl, node) {
2431 if (nt->level != TM_NODE_LEVEL_TC ||
2432 nt->priority != nt_any->priority ||
2433 nt->parent_node->parent_node_id !=
2434 nt_any->parent_node->parent_node_id)
2440 if (ss->n_users == 0 || ss->n_users != n_users)
2441 return -rte_tm_error_set(error,
2443 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2445 rte_strerror(EINVAL));
2448 /* Not too many pipe profiles. */
2449 if (pipe_profiles_generate(dev))
2450 return -rte_tm_error_set(error,
2452 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2454 rte_strerror(EINVAL));
2457 * WRED (when used, i.e. at least one WRED profile defined):
2458 * -Each WRED profile must have at least one user.
2459 * -All leaf nodes must have their private WRED context enabled.
2460 * -For each TC #i, all leaf nodes must use the same WRED profile
2461 * for their private WRED context.
2463 if (h->n_wred_profiles) {
2464 struct tm_wred_profile *wp;
2465 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2468 TAILQ_FOREACH(wp, wpl, node)
2469 if (wp->n_users == 0)
2470 return -rte_tm_error_set(error,
2472 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2474 rte_strerror(EINVAL));
2476 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2477 w[id] = tm_tc_wred_profile_get(dev, id);
2480 return -rte_tm_error_set(error,
2482 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2484 rte_strerror(EINVAL));
2487 TAILQ_FOREACH(nq, nl, node) {
2490 if (nq->level != TM_NODE_LEVEL_QUEUE)
2493 id = nq->parent_node->priority;
2495 if (nq->wred_profile == NULL ||
2496 nq->wred_profile->wred_profile_id !=
2497 w[id]->wred_profile_id)
2498 return -rte_tm_error_set(error,
2500 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2502 rte_strerror(EINVAL));
2510 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2512 struct pmd_internals *p = dev->data->dev_private;
2513 struct tm_params *t = &p->soft.tm.params;
2514 struct tm_hierarchy *h = &p->soft.tm.h;
2516 struct tm_node_list *nl = &h->nodes;
2517 struct tm_node *root = tm_root_node_present(dev), *n;
2519 uint32_t subport_id;
2521 t->port_params = (struct rte_sched_port_params) {
2522 .name = dev->data->name,
2523 .socket = dev->data->numa_node,
2524 .rate = root->shaper_profile->params.peak.rate,
2525 .mtu = dev->data->mtu,
2527 root->shaper_profile->params.pkt_length_adjust,
2528 .n_subports_per_port = root->n_children,
2529 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2530 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2531 .qsize = {p->params.tm.qsize[0],
2532 p->params.tm.qsize[1],
2533 p->params.tm.qsize[2],
2534 p->params.tm.qsize[3],
2535 p->params.tm.qsize[4],
2536 p->params.tm.qsize[5],
2537 p->params.tm.qsize[6],
2538 p->params.tm.qsize[7],
2539 p->params.tm.qsize[8],
2540 p->params.tm.qsize[9],
2541 p->params.tm.qsize[10],
2542 p->params.tm.qsize[11],
2543 p->params.tm.qsize[12],
2545 .pipe_profiles = t->pipe_profiles,
2546 .n_pipe_profiles = t->n_pipe_profiles,
2547 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2550 wred_profiles_set(dev);
2553 TAILQ_FOREACH(n, nl, node) {
2554 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2557 if (n->level != TM_NODE_LEVEL_SUBPORT)
2560 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2561 struct tm_shared_shaper *ss;
2562 struct tm_shaper_profile *sp;
2564 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2565 sp = (ss) ? tm_shaper_profile_search(dev,
2566 ss->shaper_profile_id) :
2568 tc_rate[i] = sp->params.peak.rate;
2571 t->subport_params[subport_id] =
2572 (struct rte_sched_subport_params) {
2573 .tb_rate = n->shaper_profile->params.peak.rate,
2574 .tb_size = n->shaper_profile->params.peak.size,
2576 .tc_rate = {tc_rate[0],
2590 .tc_period = SUBPORT_TC_PERIOD,
2597 /* Traffic manager hierarchy commit */
2599 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2601 struct rte_tm_error *error)
2603 struct pmd_internals *p = dev->data->dev_private;
2607 if (p->soft.tm.hierarchy_frozen)
2608 return -rte_tm_error_set(error,
2610 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2612 rte_strerror(EBUSY));
2614 status = hierarchy_commit_check(dev, error);
2617 tm_hierarchy_free(p);
2622 /* Create blueprints */
2623 hierarchy_blueprints_create(dev);
2625 /* Freeze hierarchy */
2626 p->soft.tm.hierarchy_frozen = 1;
2631 #ifdef RTE_SCHED_SUBPORT_TC_OV
2634 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2636 struct pmd_internals *p = dev->data->dev_private;
2637 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2639 struct tm_node *ns = np->parent_node;
2640 uint32_t subport_id = tm_node_subport_id(dev, ns);
2642 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2643 struct rte_sched_pipe_params profile1;
2644 uint32_t pipe_profile_id;
2646 /* Derive new pipe profile. */
2647 memcpy(&profile1, profile0, sizeof(profile1));
2648 profile1.tc_ov_weight = (uint8_t)weight;
2650 /* Since implementation does not allow adding more pipe profiles after
2651 * port configuration, the pipe configuration can be successfully
2652 * updated only if the new profile is also part of the existing set of
2655 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2658 /* Update the pipe profile used by the current pipe. */
2659 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2660 (int32_t)pipe_profile_id))
2663 /* Commit changes. */
2664 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2665 np->weight = weight;
2673 update_queue_weight(struct rte_eth_dev *dev,
2674 struct tm_node *nq, uint32_t weight)
2676 struct pmd_internals *p = dev->data->dev_private;
2677 uint32_t queue_id = tm_node_queue_id(dev, nq);
2679 struct tm_node *nt = nq->parent_node;
2681 struct tm_node *np = nt->parent_node;
2682 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2684 struct tm_node *ns = np->parent_node;
2685 uint32_t subport_id = tm_node_subport_id(dev, ns);
2687 uint32_t pipe_be_queue_id =
2688 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2690 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2691 struct rte_sched_pipe_params profile1;
2692 uint32_t pipe_profile_id;
2694 /* Derive new pipe profile. */
2695 memcpy(&profile1, profile0, sizeof(profile1));
2696 profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2698 /* Since implementation does not allow adding more pipe profiles after
2699 * port configuration, the pipe configuration can be successfully
2700 * updated only if the new profile is also part of the existing set
2703 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2706 /* Update the pipe profile used by the current pipe. */
2707 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2708 (int32_t)pipe_profile_id))
2711 /* Commit changes. */
2712 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2713 nq->weight = weight;
2718 /* Traffic manager node parent update */
2720 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2722 uint32_t parent_node_id,
2725 struct rte_tm_error *error)
2729 /* Port must be started and TM used. */
2730 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2731 return -rte_tm_error_set(error,
2733 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2735 rte_strerror(EBUSY));
2737 /* Node must be valid */
2738 n = tm_node_search(dev, node_id);
2740 return -rte_tm_error_set(error,
2742 RTE_TM_ERROR_TYPE_NODE_ID,
2744 rte_strerror(EINVAL));
2746 /* Parent node must be the same */
2747 if (n->parent_node_id != parent_node_id)
2748 return -rte_tm_error_set(error,
2750 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2752 rte_strerror(EINVAL));
2754 /* Priority must be the same */
2755 if (n->priority != priority)
2756 return -rte_tm_error_set(error,
2758 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2760 rte_strerror(EINVAL));
2762 /* weight: must be 1 .. 255 */
2763 if (weight == 0 || weight >= UINT8_MAX)
2764 return -rte_tm_error_set(error,
2766 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2768 rte_strerror(EINVAL));
2771 case TM_NODE_LEVEL_PORT:
2772 return -rte_tm_error_set(error,
2774 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2776 rte_strerror(EINVAL));
2778 case TM_NODE_LEVEL_SUBPORT:
2779 return -rte_tm_error_set(error,
2781 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2783 rte_strerror(EINVAL));
2785 case TM_NODE_LEVEL_PIPE:
2786 #ifdef RTE_SCHED_SUBPORT_TC_OV
2787 if (update_pipe_weight(dev, n, weight))
2788 return -rte_tm_error_set(error,
2790 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2792 rte_strerror(EINVAL));
2795 return -rte_tm_error_set(error,
2797 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2799 rte_strerror(EINVAL));
2802 case TM_NODE_LEVEL_TC:
2803 return -rte_tm_error_set(error,
2805 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2807 rte_strerror(EINVAL));
2809 case TM_NODE_LEVEL_QUEUE:
2812 if (update_queue_weight(dev, n, weight))
2813 return -rte_tm_error_set(error,
2815 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2817 rte_strerror(EINVAL));
2823 update_subport_rate(struct rte_eth_dev *dev,
2825 struct tm_shaper_profile *sp)
2827 struct pmd_internals *p = dev->data->dev_private;
2828 uint32_t subport_id = tm_node_subport_id(dev, ns);
2830 struct rte_sched_subport_params subport_params;
2832 /* Derive new subport configuration. */
2833 memcpy(&subport_params,
2834 &p->soft.tm.params.subport_params[subport_id],
2835 sizeof(subport_params));
2836 subport_params.tb_rate = sp->params.peak.rate;
2837 subport_params.tb_size = sp->params.peak.size;
2839 /* Update the subport configuration. */
2840 if (rte_sched_subport_config(SCHED(p), subport_id,
2844 /* Commit changes. */
2845 ns->shaper_profile->n_users--;
2847 ns->shaper_profile = sp;
2848 ns->params.shaper_profile_id = sp->shaper_profile_id;
2851 memcpy(&p->soft.tm.params.subport_params[subport_id],
2853 sizeof(subport_params));
2859 update_pipe_rate(struct rte_eth_dev *dev,
2861 struct tm_shaper_profile *sp)
2863 struct pmd_internals *p = dev->data->dev_private;
2864 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2866 struct tm_node *ns = np->parent_node;
2867 uint32_t subport_id = tm_node_subport_id(dev, ns);
2869 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2870 struct rte_sched_pipe_params profile1;
2871 uint32_t pipe_profile_id;
2873 /* Derive new pipe profile. */
2874 memcpy(&profile1, profile0, sizeof(profile1));
2875 profile1.tb_rate = sp->params.peak.rate;
2876 profile1.tb_size = sp->params.peak.size;
2878 /* Since implementation does not allow adding more pipe profiles after
2879 * port configuration, the pipe configuration can be successfully
2880 * updated only if the new profile is also part of the existing set of
2883 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2886 /* Update the pipe profile used by the current pipe. */
2887 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2888 (int32_t)pipe_profile_id))
2891 /* Commit changes. */
2892 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2893 np->shaper_profile->n_users--;
2894 np->shaper_profile = sp;
2895 np->params.shaper_profile_id = sp->shaper_profile_id;
2902 update_tc_rate(struct rte_eth_dev *dev,
2904 struct tm_shaper_profile *sp)
2906 struct pmd_internals *p = dev->data->dev_private;
2907 uint32_t tc_id = tm_node_tc_id(dev, nt);
2909 struct tm_node *np = nt->parent_node;
2910 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2912 struct tm_node *ns = np->parent_node;
2913 uint32_t subport_id = tm_node_subport_id(dev, ns);
2915 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2916 struct rte_sched_pipe_params profile1;
2917 uint32_t pipe_profile_id;
2919 /* Derive new pipe profile. */
2920 memcpy(&profile1, profile0, sizeof(profile1));
2921 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2923 /* Since implementation does not allow adding more pipe profiles after
2924 * port configuration, the pipe configuration can be successfully
2925 * updated only if the new profile is also part of the existing set of
2928 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2931 /* Update the pipe profile used by the current pipe. */
2932 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2933 (int32_t)pipe_profile_id))
2936 /* Commit changes. */
2937 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2938 nt->shaper_profile->n_users--;
2939 nt->shaper_profile = sp;
2940 nt->params.shaper_profile_id = sp->shaper_profile_id;
2946 /* Traffic manager node shaper update */
2948 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2950 uint32_t shaper_profile_id,
2951 struct rte_tm_error *error)
2954 struct tm_shaper_profile *sp;
2956 /* Port must be started and TM used. */
2957 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2958 return -rte_tm_error_set(error,
2960 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2962 rte_strerror(EBUSY));
2964 /* Node must be valid */
2965 n = tm_node_search(dev, node_id);
2967 return -rte_tm_error_set(error,
2969 RTE_TM_ERROR_TYPE_NODE_ID,
2971 rte_strerror(EINVAL));
2973 /* Shaper profile must be valid. */
2974 sp = tm_shaper_profile_search(dev, shaper_profile_id);
2976 return -rte_tm_error_set(error,
2978 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2980 rte_strerror(EINVAL));
2983 case TM_NODE_LEVEL_PORT:
2984 return -rte_tm_error_set(error,
2986 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2988 rte_strerror(EINVAL));
2990 case TM_NODE_LEVEL_SUBPORT:
2991 if (update_subport_rate(dev, n, sp))
2992 return -rte_tm_error_set(error,
2994 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2996 rte_strerror(EINVAL));
2999 case TM_NODE_LEVEL_PIPE:
3000 if (update_pipe_rate(dev, n, sp))
3001 return -rte_tm_error_set(error,
3003 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3005 rte_strerror(EINVAL));
3008 case TM_NODE_LEVEL_TC:
3009 if (update_tc_rate(dev, n, sp))
3010 return -rte_tm_error_set(error,
3012 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3014 rte_strerror(EINVAL));
3017 case TM_NODE_LEVEL_QUEUE:
3020 return -rte_tm_error_set(error,
3022 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3024 rte_strerror(EINVAL));
3028 static inline uint32_t
3029 tm_port_queue_id(struct rte_eth_dev *dev,
3030 uint32_t port_subport_id,
3031 uint32_t subport_pipe_id,
3032 uint32_t pipe_tc_id,
3033 uint32_t tc_queue_id)
3035 struct pmd_internals *p = dev->data->dev_private;
3036 struct tm_hierarchy *h = &p->soft.tm.h;
3037 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3038 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3040 uint32_t port_pipe_id =
3041 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3043 uint32_t port_queue_id =
3044 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3046 return port_queue_id;
3050 read_port_stats(struct rte_eth_dev *dev,
3052 struct rte_tm_node_stats *stats,
3053 uint64_t *stats_mask,
3056 struct pmd_internals *p = dev->data->dev_private;
3057 struct tm_hierarchy *h = &p->soft.tm.h;
3058 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3059 uint32_t subport_id;
3061 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3062 struct rte_sched_subport_stats s;
3066 int status = rte_sched_subport_read_stats(SCHED(p),
3073 /* Stats accumulate */
3074 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3076 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3077 nr->stats.n_bytes +=
3078 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3079 nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3080 s.n_pkts_tc_dropped[id];
3081 nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3082 s.n_bytes_tc_dropped[id];
3088 memcpy(stats, &nr->stats, sizeof(*stats));
3091 *stats_mask = STATS_MASK_DEFAULT;
3095 memset(&nr->stats, 0, sizeof(nr->stats));
3101 read_subport_stats(struct rte_eth_dev *dev,
3103 struct rte_tm_node_stats *stats,
3104 uint64_t *stats_mask,
3107 struct pmd_internals *p = dev->data->dev_private;
3108 uint32_t subport_id = tm_node_subport_id(dev, ns);
3109 struct rte_sched_subport_stats s;
3110 uint32_t tc_ov, tc_id;
3113 int status = rte_sched_subport_read_stats(SCHED(p),
3120 /* Stats accumulate */
3121 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3123 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3124 ns->stats.n_bytes +=
3125 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3126 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3127 s.n_pkts_tc_dropped[tc_id];
3128 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3129 s.n_bytes_tc_dropped[tc_id];
3134 memcpy(stats, &ns->stats, sizeof(*stats));
3137 *stats_mask = STATS_MASK_DEFAULT;
3141 memset(&ns->stats, 0, sizeof(ns->stats));
3147 read_pipe_stats(struct rte_eth_dev *dev,
3149 struct rte_tm_node_stats *stats,
3150 uint64_t *stats_mask,
3153 struct pmd_internals *p = dev->data->dev_private;
3155 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3157 struct tm_node *ns = np->parent_node;
3158 uint32_t subport_id = tm_node_subport_id(dev, ns);
3159 uint32_t tc_id, queue_id;
3163 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3164 struct rte_sched_queue_stats s;
3167 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3171 tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3172 queue_id = i - tc_id;
3175 uint32_t qid = tm_port_queue_id(dev,
3181 int status = rte_sched_queue_read_stats(SCHED(p),
3188 /* Stats accumulate */
3189 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3190 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3191 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3192 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3194 np->stats.leaf.n_pkts_queued = qlen;
3199 memcpy(stats, &np->stats, sizeof(*stats));
3202 *stats_mask = STATS_MASK_DEFAULT;
3206 memset(&np->stats, 0, sizeof(np->stats));
3212 read_tc_stats(struct rte_eth_dev *dev,
3214 struct rte_tm_node_stats *stats,
3215 uint64_t *stats_mask,
3218 struct pmd_internals *p = dev->data->dev_private;
3220 uint32_t tc_id = tm_node_tc_id(dev, nt);
3222 struct tm_node *np = nt->parent_node;
3223 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3225 struct tm_node *ns = np->parent_node;
3226 uint32_t subport_id = tm_node_subport_id(dev, ns);
3227 struct rte_sched_queue_stats s;
3233 if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3234 qid = tm_port_queue_id(dev,
3240 status = rte_sched_queue_read_stats(SCHED(p),
3247 /* Stats accumulate */
3248 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3249 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3250 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3251 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3253 nt->stats.leaf.n_pkts_queued = qlen;
3255 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3256 qid = tm_port_queue_id(dev,
3262 status = rte_sched_queue_read_stats(SCHED(p),
3269 /* Stats accumulate */
3270 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3271 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3272 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3274 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3276 nt->stats.leaf.n_pkts_queued = qlen;
3282 memcpy(stats, &nt->stats, sizeof(*stats));
3285 *stats_mask = STATS_MASK_DEFAULT;
3289 memset(&nt->stats, 0, sizeof(nt->stats));
3295 read_queue_stats(struct rte_eth_dev *dev,
3297 struct rte_tm_node_stats *stats,
3298 uint64_t *stats_mask,
3301 struct pmd_internals *p = dev->data->dev_private;
3302 struct rte_sched_queue_stats s;
3305 uint32_t queue_id = tm_node_queue_id(dev, nq);
3307 struct tm_node *nt = nq->parent_node;
3308 uint32_t tc_id = tm_node_tc_id(dev, nt);
3310 struct tm_node *np = nt->parent_node;
3311 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3313 struct tm_node *ns = np->parent_node;
3314 uint32_t subport_id = tm_node_subport_id(dev, ns);
3317 uint32_t qid = tm_port_queue_id(dev,
3323 int status = rte_sched_queue_read_stats(SCHED(p),
3330 /* Stats accumulate */
3331 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3332 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3333 nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3334 nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3336 nq->stats.leaf.n_pkts_queued = qlen;
3340 memcpy(stats, &nq->stats, sizeof(*stats));
3343 *stats_mask = STATS_MASK_QUEUE;
3347 memset(&nq->stats, 0, sizeof(nq->stats));
3352 /* Traffic manager read stats counters for specific node */
3354 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3356 struct rte_tm_node_stats *stats,
3357 uint64_t *stats_mask,
3359 struct rte_tm_error *error)
3363 /* Port must be started and TM used. */
3364 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3365 return -rte_tm_error_set(error,
3367 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3369 rte_strerror(EBUSY));
3371 /* Node must be valid */
3372 n = tm_node_search(dev, node_id);
3374 return -rte_tm_error_set(error,
3376 RTE_TM_ERROR_TYPE_NODE_ID,
3378 rte_strerror(EINVAL));
3381 case TM_NODE_LEVEL_PORT:
3382 if (read_port_stats(dev, n, stats, stats_mask, clear))
3383 return -rte_tm_error_set(error,
3385 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3387 rte_strerror(EINVAL));
3390 case TM_NODE_LEVEL_SUBPORT:
3391 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3392 return -rte_tm_error_set(error,
3394 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3396 rte_strerror(EINVAL));
3399 case TM_NODE_LEVEL_PIPE:
3400 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3401 return -rte_tm_error_set(error,
3403 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3405 rte_strerror(EINVAL));
3408 case TM_NODE_LEVEL_TC:
3409 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3410 return -rte_tm_error_set(error,
3412 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3414 rte_strerror(EINVAL));
3417 case TM_NODE_LEVEL_QUEUE:
3419 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3420 return -rte_tm_error_set(error,
3422 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3424 rte_strerror(EINVAL));
3429 const struct rte_tm_ops pmd_tm_ops = {
3430 .node_type_get = pmd_tm_node_type_get,
3431 .capabilities_get = pmd_tm_capabilities_get,
3432 .level_capabilities_get = pmd_tm_level_capabilities_get,
3433 .node_capabilities_get = pmd_tm_node_capabilities_get,
3435 .wred_profile_add = pmd_tm_wred_profile_add,
3436 .wred_profile_delete = pmd_tm_wred_profile_delete,
3437 .shared_wred_context_add_update = NULL,
3438 .shared_wred_context_delete = NULL,
3440 .shaper_profile_add = pmd_tm_shaper_profile_add,
3441 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3442 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3443 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3445 .node_add = pmd_tm_node_add,
3446 .node_delete = pmd_tm_node_delete,
3447 .node_suspend = NULL,
3448 .node_resume = NULL,
3449 .hierarchy_commit = pmd_tm_hierarchy_commit,
3451 .node_parent_update = pmd_tm_node_parent_update,
3452 .node_shaper_update = pmd_tm_node_shaper_update,
3453 .node_shared_shaper_update = NULL,
3454 .node_stats_update = NULL,
3455 .node_wfq_weight_mode_update = NULL,
3456 .node_cman_update = NULL,
3457 .node_wred_context_update = NULL,
3458 .node_shared_wred_context_update = NULL,
3460 .node_stats_read = pmd_tm_node_stats_read,