1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 softnic_tmgr_init(struct pmd_internals *p)
21 TAILQ_INIT(&p->tmgr_port_list);
27 softnic_tmgr_free(struct pmd_internals *p)
30 struct softnic_tmgr_port *tmgr_port;
32 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33 if (tmgr_port == NULL)
36 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37 rte_sched_port_free(tmgr_port->s);
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
46 struct softnic_tmgr_port *tmgr_port;
51 TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52 if (strcmp(tmgr_port->name, name) == 0)
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
62 struct softnic_tmgr_port *tmgr_port;
63 struct tm_params *t = &p->soft.tm.params;
64 struct rte_sched_port *sched;
65 uint32_t n_subports, subport_id;
67 /* Check input params */
69 softnic_tmgr_port_find(p, name))
76 /* Is hierarchy frozen? */
77 if (p->soft.tm.hierarchy_frozen == 0)
81 sched = rte_sched_port_config(&t->port_params);
86 n_subports = t->port_params.n_subports_per_port;
87 for (subport_id = 0; subport_id < n_subports; subport_id++) {
88 uint32_t n_pipes_per_subport =
89 t->subport_params[subport_id].n_pipes_per_subport_enabled;
93 status = rte_sched_subport_config(sched,
95 &t->subport_params[subport_id]);
97 rte_sched_port_free(sched);
102 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
103 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
104 int profile_id = t->pipe_to_profile[pos];
109 status = rte_sched_pipe_config(sched,
114 rte_sched_port_free(sched);
120 /* Node allocation */
121 tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
122 if (tmgr_port == NULL) {
123 rte_sched_port_free(sched);
128 strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
129 tmgr_port->s = sched;
131 /* Node add to list */
132 TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
137 static struct rte_sched_port *
138 SCHED(struct pmd_internals *p)
140 struct softnic_tmgr_port *tmgr_port;
142 tmgr_port = softnic_tmgr_port_find(p, "TMGR");
143 if (tmgr_port == NULL)
150 tm_hierarchy_init(struct pmd_internals *p)
152 memset(&p->soft.tm, 0, sizeof(p->soft.tm));
154 /* Initialize shaper profile list */
155 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
157 /* Initialize shared shaper list */
158 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
160 /* Initialize wred profile list */
161 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
163 /* Initialize TM node list */
164 TAILQ_INIT(&p->soft.tm.h.nodes);
168 tm_hierarchy_free(struct pmd_internals *p)
170 /* Remove all nodes*/
172 struct tm_node *tm_node;
174 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
178 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
182 /* Remove all WRED profiles */
184 struct tm_wred_profile *wred_profile;
186 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
187 if (wred_profile == NULL)
190 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
194 /* Remove all shared shapers */
196 struct tm_shared_shaper *shared_shaper;
198 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
199 if (shared_shaper == NULL)
202 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
206 /* Remove all shaper profiles */
208 struct tm_shaper_profile *shaper_profile;
210 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
211 if (shaper_profile == NULL)
214 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
215 shaper_profile, node);
216 free(shaper_profile);
219 tm_hierarchy_init(p);
222 static struct tm_shaper_profile *
223 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
225 struct pmd_internals *p = dev->data->dev_private;
226 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
227 struct tm_shaper_profile *sp;
229 TAILQ_FOREACH(sp, spl, node)
230 if (shaper_profile_id == sp->shaper_profile_id)
236 static struct tm_shared_shaper *
237 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
239 struct pmd_internals *p = dev->data->dev_private;
240 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
241 struct tm_shared_shaper *ss;
243 TAILQ_FOREACH(ss, ssl, node)
244 if (shared_shaper_id == ss->shared_shaper_id)
250 static struct tm_wred_profile *
251 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
253 struct pmd_internals *p = dev->data->dev_private;
254 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
255 struct tm_wred_profile *wp;
257 TAILQ_FOREACH(wp, wpl, node)
258 if (wred_profile_id == wp->wred_profile_id)
264 static struct tm_node *
265 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
267 struct pmd_internals *p = dev->data->dev_private;
268 struct tm_node_list *nl = &p->soft.tm.h.nodes;
271 TAILQ_FOREACH(n, nl, node)
272 if (n->node_id == node_id)
278 static struct tm_node *
279 tm_root_node_present(struct rte_eth_dev *dev)
281 struct pmd_internals *p = dev->data->dev_private;
282 struct tm_node_list *nl = &p->soft.tm.h.nodes;
285 TAILQ_FOREACH(n, nl, node)
286 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
293 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
295 struct pmd_internals *p = dev->data->dev_private;
296 struct tm_node_list *nl = &p->soft.tm.h.nodes;
301 TAILQ_FOREACH(ns, nl, node) {
302 if (ns->level != TM_NODE_LEVEL_SUBPORT)
305 if (ns->node_id == subport_node->node_id)
315 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
317 struct pmd_internals *p = dev->data->dev_private;
318 struct tm_node_list *nl = &p->soft.tm.h.nodes;
323 TAILQ_FOREACH(np, nl, node) {
324 if (np->level != TM_NODE_LEVEL_PIPE ||
325 np->parent_node_id != pipe_node->parent_node_id)
328 if (np->node_id == pipe_node->node_id)
338 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
340 return tc_node->priority;
344 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
346 struct pmd_internals *p = dev->data->dev_private;
347 struct tm_node_list *nl = &p->soft.tm.h.nodes;
352 TAILQ_FOREACH(nq, nl, node) {
353 if (nq->level != TM_NODE_LEVEL_QUEUE ||
354 nq->parent_node_id != queue_node->parent_node_id)
357 if (nq->node_id == queue_node->node_id)
367 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
369 struct pmd_internals *p = dev->data->dev_private;
370 uint32_t n_queues_max = p->params.tm.n_queues;
372 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
373 / RTE_SCHED_QUEUES_PER_PIPE;
374 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
375 uint32_t n_subports_max = n_pipes_max;
376 uint32_t n_root_max = 1;
379 case TM_NODE_LEVEL_PORT:
381 case TM_NODE_LEVEL_SUBPORT:
382 return n_subports_max;
383 case TM_NODE_LEVEL_PIPE:
385 case TM_NODE_LEVEL_TC:
387 case TM_NODE_LEVEL_QUEUE:
393 /* Traffic manager node type get */
395 pmd_tm_node_type_get(struct rte_eth_dev *dev,
398 struct rte_tm_error *error)
400 struct pmd_internals *p = dev->data->dev_private;
403 return -rte_tm_error_set(error,
405 RTE_TM_ERROR_TYPE_UNSPECIFIED,
407 rte_strerror(EINVAL));
409 if (node_id == RTE_TM_NODE_ID_NULL ||
410 (tm_node_search(dev, node_id) == NULL))
411 return -rte_tm_error_set(error,
413 RTE_TM_ERROR_TYPE_NODE_ID,
415 rte_strerror(EINVAL));
417 *is_leaf = node_id < p->params.tm.n_queues;
423 #define WRED_SUPPORTED 1
425 #define WRED_SUPPORTED 0
428 #define STATS_MASK_DEFAULT \
429 (RTE_TM_STATS_N_PKTS | \
430 RTE_TM_STATS_N_BYTES | \
431 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
432 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
434 #define STATS_MASK_QUEUE \
435 (STATS_MASK_DEFAULT | \
436 RTE_TM_STATS_N_PKTS_QUEUED)
438 static const struct rte_tm_capabilities tm_cap = {
439 .n_nodes_max = UINT32_MAX,
440 .n_levels_max = TM_NODE_LEVEL_MAX,
442 .non_leaf_nodes_identical = 0,
443 .leaf_nodes_identical = 1,
445 .shaper_n_max = UINT32_MAX,
446 .shaper_private_n_max = UINT32_MAX,
447 .shaper_private_dual_rate_n_max = 0,
448 .shaper_private_rate_min = 1,
449 .shaper_private_rate_max = UINT32_MAX,
450 .shaper_private_packet_mode_supported = 0,
451 .shaper_private_byte_mode_supported = 1,
453 .shaper_shared_n_max = UINT32_MAX,
454 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
455 .shaper_shared_n_shapers_per_node_max = 1,
456 .shaper_shared_dual_rate_n_max = 0,
457 .shaper_shared_rate_min = 1,
458 .shaper_shared_rate_max = UINT32_MAX,
459 .shaper_shared_packet_mode_supported = 0,
460 .shaper_shared_byte_mode_supported = 1,
462 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
463 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
465 .sched_n_children_max = UINT32_MAX,
466 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
467 .sched_wfq_n_children_per_group_max = UINT32_MAX,
468 .sched_wfq_n_groups_max = 1,
469 .sched_wfq_weight_max = UINT32_MAX,
470 .sched_wfq_packet_mode_supported = 0,
471 .sched_wfq_byte_mode_supported = 1,
473 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
474 .cman_wred_byte_mode_supported = 0,
475 .cman_head_drop_supported = 0,
476 .cman_wred_context_n_max = 0,
477 .cman_wred_context_private_n_max = 0,
478 .cman_wred_context_shared_n_max = 0,
479 .cman_wred_context_shared_n_nodes_per_context_max = 0,
480 .cman_wred_context_shared_n_contexts_per_node_max = 0,
482 .mark_vlan_dei_supported = {0, 0, 0},
483 .mark_ip_ecn_tcp_supported = {0, 0, 0},
484 .mark_ip_ecn_sctp_supported = {0, 0, 0},
485 .mark_ip_dscp_supported = {0, 0, 0},
487 .dynamic_update_mask = 0,
489 .stats_mask = STATS_MASK_QUEUE,
492 /* Traffic manager capabilities get */
494 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
495 struct rte_tm_capabilities *cap,
496 struct rte_tm_error *error)
499 return -rte_tm_error_set(error,
501 RTE_TM_ERROR_TYPE_CAPABILITIES,
503 rte_strerror(EINVAL));
505 memcpy(cap, &tm_cap, sizeof(*cap));
507 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
508 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
509 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
510 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
511 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
513 cap->shaper_private_n_max =
514 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
515 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
516 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
517 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
519 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
520 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
522 cap->shaper_n_max = cap->shaper_private_n_max +
523 cap->shaper_shared_n_max;
525 cap->shaper_shared_n_nodes_per_shaper_max =
526 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
528 cap->sched_n_children_max = RTE_MAX(
529 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
530 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
532 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
535 cap->cman_wred_context_private_n_max =
536 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
538 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
539 cap->cman_wred_context_shared_n_max;
544 static const struct rte_tm_level_capabilities tm_level_cap[] = {
545 [TM_NODE_LEVEL_PORT] = {
547 .n_nodes_nonleaf_max = 1,
548 .n_nodes_leaf_max = 0,
549 .non_leaf_nodes_identical = 1,
550 .leaf_nodes_identical = 0,
553 .shaper_private_supported = 1,
554 .shaper_private_dual_rate_supported = 0,
555 .shaper_private_rate_min = 1,
556 .shaper_private_rate_max = UINT32_MAX,
557 .shaper_private_packet_mode_supported = 0,
558 .shaper_private_byte_mode_supported = 1,
559 .shaper_shared_n_max = 0,
560 .shaper_shared_packet_mode_supported = 0,
561 .shaper_shared_byte_mode_supported = 0,
563 .sched_n_children_max = UINT32_MAX,
564 .sched_sp_n_priorities_max = 1,
565 .sched_wfq_n_children_per_group_max = UINT32_MAX,
566 .sched_wfq_n_groups_max = 1,
567 .sched_wfq_weight_max = 1,
568 .sched_wfq_packet_mode_supported = 0,
569 .sched_wfq_byte_mode_supported = 1,
571 .stats_mask = STATS_MASK_DEFAULT,
575 [TM_NODE_LEVEL_SUBPORT] = {
576 .n_nodes_max = UINT32_MAX,
577 .n_nodes_nonleaf_max = UINT32_MAX,
578 .n_nodes_leaf_max = 0,
579 .non_leaf_nodes_identical = 1,
580 .leaf_nodes_identical = 0,
583 .shaper_private_supported = 1,
584 .shaper_private_dual_rate_supported = 0,
585 .shaper_private_rate_min = 1,
586 .shaper_private_rate_max = UINT32_MAX,
587 .shaper_private_packet_mode_supported = 0,
588 .shaper_private_byte_mode_supported = 1,
589 .shaper_shared_n_max = 0,
590 .shaper_shared_packet_mode_supported = 0,
591 .shaper_shared_byte_mode_supported = 0,
593 .sched_n_children_max = UINT32_MAX,
594 .sched_sp_n_priorities_max = 1,
595 .sched_wfq_n_children_per_group_max = UINT32_MAX,
596 .sched_wfq_n_groups_max = 1,
597 #ifdef RTE_SCHED_SUBPORT_TC_OV
598 .sched_wfq_weight_max = UINT32_MAX,
599 .sched_wfq_packet_mode_supported = 0,
600 .sched_wfq_byte_mode_supported = 1,
602 .sched_wfq_weight_max = 1,
603 .sched_wfq_packet_mode_supported = 0,
604 .sched_wfq_byte_mode_supported = 1,
607 .stats_mask = STATS_MASK_DEFAULT,
611 [TM_NODE_LEVEL_PIPE] = {
612 .n_nodes_max = UINT32_MAX,
613 .n_nodes_nonleaf_max = UINT32_MAX,
614 .n_nodes_leaf_max = 0,
615 .non_leaf_nodes_identical = 1,
616 .leaf_nodes_identical = 0,
619 .shaper_private_supported = 1,
620 .shaper_private_dual_rate_supported = 0,
621 .shaper_private_rate_min = 1,
622 .shaper_private_rate_max = UINT32_MAX,
623 .shaper_private_packet_mode_supported = 0,
624 .shaper_private_byte_mode_supported = 1,
625 .shaper_shared_n_max = 0,
626 .shaper_shared_packet_mode_supported = 0,
627 .shaper_shared_byte_mode_supported = 0,
629 .sched_n_children_max =
630 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
631 .sched_sp_n_priorities_max =
632 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
633 .sched_wfq_n_children_per_group_max = 1,
634 .sched_wfq_n_groups_max = 0,
635 .sched_wfq_weight_max = 1,
636 .sched_wfq_packet_mode_supported = 0,
637 .sched_wfq_byte_mode_supported = 0,
639 .stats_mask = STATS_MASK_DEFAULT,
643 [TM_NODE_LEVEL_TC] = {
644 .n_nodes_max = UINT32_MAX,
645 .n_nodes_nonleaf_max = UINT32_MAX,
646 .n_nodes_leaf_max = 0,
647 .non_leaf_nodes_identical = 1,
648 .leaf_nodes_identical = 0,
651 .shaper_private_supported = 1,
652 .shaper_private_dual_rate_supported = 0,
653 .shaper_private_rate_min = 1,
654 .shaper_private_rate_max = UINT32_MAX,
655 .shaper_private_packet_mode_supported = 0,
656 .shaper_private_byte_mode_supported = 1,
657 .shaper_shared_n_max = 1,
658 .shaper_shared_packet_mode_supported = 0,
659 .shaper_shared_byte_mode_supported = 1,
661 .sched_n_children_max =
662 RTE_SCHED_BE_QUEUES_PER_PIPE,
663 .sched_sp_n_priorities_max = 1,
664 .sched_wfq_n_children_per_group_max =
665 RTE_SCHED_BE_QUEUES_PER_PIPE,
666 .sched_wfq_n_groups_max = 1,
667 .sched_wfq_weight_max = UINT32_MAX,
668 .sched_wfq_packet_mode_supported = 0,
669 .sched_wfq_byte_mode_supported = 1,
671 .stats_mask = STATS_MASK_DEFAULT,
675 [TM_NODE_LEVEL_QUEUE] = {
676 .n_nodes_max = UINT32_MAX,
677 .n_nodes_nonleaf_max = 0,
678 .n_nodes_leaf_max = UINT32_MAX,
679 .non_leaf_nodes_identical = 0,
680 .leaf_nodes_identical = 1,
683 .shaper_private_supported = 0,
684 .shaper_private_dual_rate_supported = 0,
685 .shaper_private_rate_min = 0,
686 .shaper_private_rate_max = 0,
687 .shaper_private_packet_mode_supported = 0,
688 .shaper_private_byte_mode_supported = 0,
689 .shaper_shared_n_max = 0,
690 .shaper_shared_packet_mode_supported = 0,
691 .shaper_shared_byte_mode_supported = 0,
693 .cman_head_drop_supported = 0,
694 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
695 .cman_wred_byte_mode_supported = 0,
696 .cman_wred_context_private_supported = WRED_SUPPORTED,
697 .cman_wred_context_shared_n_max = 0,
699 .stats_mask = STATS_MASK_QUEUE,
704 /* Traffic manager level capabilities get */
706 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
708 struct rte_tm_level_capabilities *cap,
709 struct rte_tm_error *error)
712 return -rte_tm_error_set(error,
714 RTE_TM_ERROR_TYPE_CAPABILITIES,
716 rte_strerror(EINVAL));
718 if (level_id >= TM_NODE_LEVEL_MAX)
719 return -rte_tm_error_set(error,
721 RTE_TM_ERROR_TYPE_LEVEL_ID,
723 rte_strerror(EINVAL));
725 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
728 case TM_NODE_LEVEL_PORT:
729 cap->nonleaf.sched_n_children_max =
730 tm_level_get_max_nodes(dev,
731 TM_NODE_LEVEL_SUBPORT);
732 cap->nonleaf.sched_wfq_n_children_per_group_max =
733 cap->nonleaf.sched_n_children_max;
736 case TM_NODE_LEVEL_SUBPORT:
737 cap->n_nodes_max = tm_level_get_max_nodes(dev,
738 TM_NODE_LEVEL_SUBPORT);
739 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
740 cap->nonleaf.sched_n_children_max =
741 tm_level_get_max_nodes(dev,
743 cap->nonleaf.sched_wfq_n_children_per_group_max =
744 cap->nonleaf.sched_n_children_max;
747 case TM_NODE_LEVEL_PIPE:
748 cap->n_nodes_max = tm_level_get_max_nodes(dev,
750 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
753 case TM_NODE_LEVEL_TC:
754 cap->n_nodes_max = tm_level_get_max_nodes(dev,
756 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
759 case TM_NODE_LEVEL_QUEUE:
761 cap->n_nodes_max = tm_level_get_max_nodes(dev,
762 TM_NODE_LEVEL_QUEUE);
763 cap->n_nodes_leaf_max = cap->n_nodes_max;
770 static const struct rte_tm_node_capabilities tm_node_cap[] = {
771 [TM_NODE_LEVEL_PORT] = {
772 .shaper_private_supported = 1,
773 .shaper_private_dual_rate_supported = 0,
774 .shaper_private_rate_min = 1,
775 .shaper_private_rate_max = UINT32_MAX,
776 .shaper_private_packet_mode_supported = 0,
777 .shaper_private_byte_mode_supported = 1,
778 .shaper_shared_n_max = 0,
779 .shaper_shared_packet_mode_supported = 0,
780 .shaper_shared_byte_mode_supported = 0,
783 .sched_n_children_max = UINT32_MAX,
784 .sched_sp_n_priorities_max = 1,
785 .sched_wfq_n_children_per_group_max = UINT32_MAX,
786 .sched_wfq_n_groups_max = 1,
787 .sched_wfq_weight_max = 1,
788 .sched_wfq_packet_mode_supported = 0,
789 .sched_wfq_byte_mode_supported = 1,
792 .stats_mask = STATS_MASK_DEFAULT,
795 [TM_NODE_LEVEL_SUBPORT] = {
796 .shaper_private_supported = 1,
797 .shaper_private_dual_rate_supported = 0,
798 .shaper_private_rate_min = 1,
799 .shaper_private_rate_max = UINT32_MAX,
800 .shaper_private_packet_mode_supported = 0,
801 .shaper_private_byte_mode_supported = 1,
802 .shaper_shared_n_max = 0,
803 .shaper_shared_packet_mode_supported = 0,
804 .shaper_shared_byte_mode_supported = 0,
807 .sched_n_children_max = UINT32_MAX,
808 .sched_sp_n_priorities_max = 1,
809 .sched_wfq_n_children_per_group_max = UINT32_MAX,
810 .sched_wfq_n_groups_max = 1,
811 .sched_wfq_weight_max = UINT32_MAX,
812 .sched_wfq_packet_mode_supported = 0,
813 .sched_wfq_byte_mode_supported = 1,
816 .stats_mask = STATS_MASK_DEFAULT,
819 [TM_NODE_LEVEL_PIPE] = {
820 .shaper_private_supported = 1,
821 .shaper_private_dual_rate_supported = 0,
822 .shaper_private_rate_min = 1,
823 .shaper_private_rate_max = UINT32_MAX,
824 .shaper_private_packet_mode_supported = 0,
825 .shaper_private_byte_mode_supported = 1,
826 .shaper_shared_n_max = 0,
827 .shaper_shared_packet_mode_supported = 0,
828 .shaper_shared_byte_mode_supported = 0,
831 .sched_n_children_max =
832 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
833 .sched_sp_n_priorities_max =
834 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
835 .sched_wfq_n_children_per_group_max = 1,
836 .sched_wfq_n_groups_max = 0,
837 .sched_wfq_weight_max = 1,
838 .sched_wfq_packet_mode_supported = 0,
839 .sched_wfq_byte_mode_supported = 0,
842 .stats_mask = STATS_MASK_DEFAULT,
845 [TM_NODE_LEVEL_TC] = {
846 .shaper_private_supported = 1,
847 .shaper_private_dual_rate_supported = 0,
848 .shaper_private_rate_min = 1,
849 .shaper_private_rate_max = UINT32_MAX,
850 .shaper_private_packet_mode_supported = 0,
851 .shaper_private_byte_mode_supported = 1,
852 .shaper_shared_n_max = 1,
853 .shaper_shared_packet_mode_supported = 0,
854 .shaper_shared_byte_mode_supported = 1,
857 .sched_n_children_max =
858 RTE_SCHED_BE_QUEUES_PER_PIPE,
859 .sched_sp_n_priorities_max = 1,
860 .sched_wfq_n_children_per_group_max =
861 RTE_SCHED_BE_QUEUES_PER_PIPE,
862 .sched_wfq_n_groups_max = 1,
863 .sched_wfq_weight_max = UINT32_MAX,
864 .sched_wfq_packet_mode_supported = 0,
865 .sched_wfq_byte_mode_supported = 1,
868 .stats_mask = STATS_MASK_DEFAULT,
871 [TM_NODE_LEVEL_QUEUE] = {
872 .shaper_private_supported = 0,
873 .shaper_private_dual_rate_supported = 0,
874 .shaper_private_rate_min = 0,
875 .shaper_private_rate_max = 0,
876 .shaper_private_packet_mode_supported = 0,
877 .shaper_private_byte_mode_supported = 0,
878 .shaper_shared_n_max = 0,
879 .shaper_shared_packet_mode_supported = 0,
880 .shaper_shared_byte_mode_supported = 0,
884 .cman_head_drop_supported = 0,
885 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
886 .cman_wred_byte_mode_supported = 0,
887 .cman_wred_context_private_supported = WRED_SUPPORTED,
888 .cman_wred_context_shared_n_max = 0,
891 .stats_mask = STATS_MASK_QUEUE,
895 /* Traffic manager node capabilities get */
897 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
899 struct rte_tm_node_capabilities *cap,
900 struct rte_tm_error *error)
902 struct tm_node *tm_node;
905 return -rte_tm_error_set(error,
907 RTE_TM_ERROR_TYPE_CAPABILITIES,
909 rte_strerror(EINVAL));
911 tm_node = tm_node_search(dev, node_id);
913 return -rte_tm_error_set(error,
915 RTE_TM_ERROR_TYPE_NODE_ID,
917 rte_strerror(EINVAL));
919 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
921 switch (tm_node->level) {
922 case TM_NODE_LEVEL_PORT:
923 cap->nonleaf.sched_n_children_max =
924 tm_level_get_max_nodes(dev,
925 TM_NODE_LEVEL_SUBPORT);
926 cap->nonleaf.sched_wfq_n_children_per_group_max =
927 cap->nonleaf.sched_n_children_max;
930 case TM_NODE_LEVEL_SUBPORT:
931 cap->nonleaf.sched_n_children_max =
932 tm_level_get_max_nodes(dev,
934 cap->nonleaf.sched_wfq_n_children_per_group_max =
935 cap->nonleaf.sched_n_children_max;
938 case TM_NODE_LEVEL_PIPE:
939 case TM_NODE_LEVEL_TC:
940 case TM_NODE_LEVEL_QUEUE:
949 shaper_profile_check(struct rte_eth_dev *dev,
950 uint32_t shaper_profile_id,
951 struct rte_tm_shaper_params *profile,
952 struct rte_tm_error *error)
954 struct tm_shaper_profile *sp;
956 /* Shaper profile ID must not be NONE. */
957 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
958 return -rte_tm_error_set(error,
960 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
962 rte_strerror(EINVAL));
964 /* Shaper profile must not exist. */
965 sp = tm_shaper_profile_search(dev, shaper_profile_id);
967 return -rte_tm_error_set(error,
969 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
971 rte_strerror(EEXIST));
973 /* Profile must not be NULL. */
975 return -rte_tm_error_set(error,
977 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
979 rte_strerror(EINVAL));
981 /* Peak rate: non-zero, 32-bit */
982 if (profile->peak.rate == 0 ||
983 profile->peak.rate >= UINT32_MAX)
984 return -rte_tm_error_set(error,
986 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
988 rte_strerror(EINVAL));
990 /* Peak size: non-zero, 32-bit */
991 if (profile->peak.size == 0 ||
992 profile->peak.size >= UINT32_MAX)
993 return -rte_tm_error_set(error,
995 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
997 rte_strerror(EINVAL));
999 /* Dual-rate profiles are not supported. */
1000 if (profile->committed.rate != 0)
1001 return -rte_tm_error_set(error,
1003 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
1005 rte_strerror(EINVAL));
1007 /* Packet length adjust: 24 bytes */
1008 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
1009 return -rte_tm_error_set(error,
1011 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
1013 rte_strerror(EINVAL));
1015 /* Packet mode is not supported. */
1016 if (profile->packet_mode != 0)
1017 return -rte_tm_error_set(error,
1019 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE,
1021 rte_strerror(EINVAL));
1025 /* Traffic manager shaper profile add */
1027 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
1028 uint32_t shaper_profile_id,
1029 struct rte_tm_shaper_params *profile,
1030 struct rte_tm_error *error)
1032 struct pmd_internals *p = dev->data->dev_private;
1033 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
1034 struct tm_shaper_profile *sp;
1037 /* Check input params */
1038 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
1042 /* Memory allocation */
1043 sp = calloc(1, sizeof(struct tm_shaper_profile));
1045 return -rte_tm_error_set(error,
1047 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1049 rte_strerror(ENOMEM));
1052 sp->shaper_profile_id = shaper_profile_id;
1053 memcpy(&sp->params, profile, sizeof(sp->params));
1056 TAILQ_INSERT_TAIL(spl, sp, node);
1057 p->soft.tm.h.n_shaper_profiles++;
1062 /* Traffic manager shaper profile delete */
1064 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1065 uint32_t shaper_profile_id,
1066 struct rte_tm_error *error)
1068 struct pmd_internals *p = dev->data->dev_private;
1069 struct tm_shaper_profile *sp;
1071 /* Check existing */
1072 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1074 return -rte_tm_error_set(error,
1076 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1078 rte_strerror(EINVAL));
1082 return -rte_tm_error_set(error,
1084 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1086 rte_strerror(EBUSY));
1088 /* Remove from list */
1089 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1090 p->soft.tm.h.n_shaper_profiles--;
1096 static struct tm_node *
1097 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1098 struct tm_shared_shaper *ss)
1100 struct pmd_internals *p = dev->data->dev_private;
1101 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1104 /* Subport: each TC uses shared shaper */
1105 TAILQ_FOREACH(n, nl, node) {
1106 if (n->level != TM_NODE_LEVEL_TC ||
1107 n->params.n_shared_shapers == 0 ||
1108 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1118 update_subport_tc_rate(struct rte_eth_dev *dev,
1120 struct tm_shared_shaper *ss,
1121 struct tm_shaper_profile *sp_new)
1123 struct pmd_internals *p = dev->data->dev_private;
1124 uint32_t tc_id = tm_node_tc_id(dev, nt);
1126 struct tm_node *np = nt->parent_node;
1128 struct tm_node *ns = np->parent_node;
1129 uint32_t subport_id = tm_node_subport_id(dev, ns);
1131 struct rte_sched_subport_params subport_params;
1133 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1134 ss->shaper_profile_id);
1136 /* Derive new subport configuration. */
1137 memcpy(&subport_params,
1138 &p->soft.tm.params.subport_params[subport_id],
1139 sizeof(subport_params));
1140 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1142 /* Update the subport configuration. */
1143 if (rte_sched_subport_config(SCHED(p),
1144 subport_id, &subport_params))
1147 /* Commit changes. */
1150 ss->shaper_profile_id = sp_new->shaper_profile_id;
1153 memcpy(&p->soft.tm.params.subport_params[subport_id],
1155 sizeof(subport_params));
1160 /* Traffic manager shared shaper add/update */
1162 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1163 uint32_t shared_shaper_id,
1164 uint32_t shaper_profile_id,
1165 struct rte_tm_error *error)
1167 struct pmd_internals *p = dev->data->dev_private;
1168 struct tm_shared_shaper *ss;
1169 struct tm_shaper_profile *sp;
1172 /* Shaper profile must be valid. */
1173 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1175 return -rte_tm_error_set(error,
1177 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1179 rte_strerror(EINVAL));
1182 * Add new shared shaper
1184 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1186 struct tm_shared_shaper_list *ssl =
1187 &p->soft.tm.h.shared_shapers;
1189 /* Hierarchy must not be frozen */
1190 if (p->soft.tm.hierarchy_frozen)
1191 return -rte_tm_error_set(error,
1193 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1195 rte_strerror(EBUSY));
1197 /* Memory allocation */
1198 ss = calloc(1, sizeof(struct tm_shared_shaper));
1200 return -rte_tm_error_set(error,
1202 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1204 rte_strerror(ENOMEM));
1207 ss->shared_shaper_id = shared_shaper_id;
1208 ss->shaper_profile_id = shaper_profile_id;
1211 TAILQ_INSERT_TAIL(ssl, ss, node);
1212 p->soft.tm.h.n_shared_shapers++;
1218 * Update existing shared shaper
1220 /* Hierarchy must be frozen (run-time update) */
1221 if (p->soft.tm.hierarchy_frozen == 0)
1222 return -rte_tm_error_set(error,
1224 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1226 rte_strerror(EBUSY));
1229 /* Propagate change. */
1230 nt = tm_shared_shaper_get_tc(dev, ss);
1231 if (update_subport_tc_rate(dev, nt, ss, sp))
1232 return -rte_tm_error_set(error,
1234 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1236 rte_strerror(EINVAL));
1241 /* Traffic manager shared shaper delete */
1243 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1244 uint32_t shared_shaper_id,
1245 struct rte_tm_error *error)
1247 struct pmd_internals *p = dev->data->dev_private;
1248 struct tm_shared_shaper *ss;
1250 /* Check existing */
1251 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1253 return -rte_tm_error_set(error,
1255 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1257 rte_strerror(EINVAL));
1261 return -rte_tm_error_set(error,
1263 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1265 rte_strerror(EBUSY));
1267 /* Remove from list */
1268 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1269 p->soft.tm.h.n_shared_shapers--;
1276 wred_profile_check(struct rte_eth_dev *dev,
1277 uint32_t wred_profile_id,
1278 struct rte_tm_wred_params *profile,
1279 struct rte_tm_error *error)
1281 struct tm_wred_profile *wp;
1282 enum rte_color color;
1284 /* WRED profile ID must not be NONE. */
1285 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1286 return -rte_tm_error_set(error,
1288 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1290 rte_strerror(EINVAL));
1292 /* WRED profile must not exist. */
1293 wp = tm_wred_profile_search(dev, wred_profile_id);
1295 return -rte_tm_error_set(error,
1297 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1299 rte_strerror(EEXIST));
1301 /* Profile must not be NULL. */
1302 if (profile == NULL)
1303 return -rte_tm_error_set(error,
1305 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1307 rte_strerror(EINVAL));
1309 /* WRED profile should be in packet mode */
1310 if (profile->packet_mode == 0)
1311 return -rte_tm_error_set(error,
1313 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1315 rte_strerror(ENOTSUP));
1317 /* min_th <= max_th, max_th > 0 */
1318 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1319 uint32_t min_th = profile->red_params[color].min_th;
1320 uint32_t max_th = profile->red_params[color].max_th;
1322 if (min_th > max_th ||
1324 min_th > UINT16_MAX ||
1325 max_th > UINT16_MAX)
1326 return -rte_tm_error_set(error,
1328 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1330 rte_strerror(EINVAL));
1336 /* Traffic manager WRED profile add */
1338 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1339 uint32_t wred_profile_id,
1340 struct rte_tm_wred_params *profile,
1341 struct rte_tm_error *error)
1343 struct pmd_internals *p = dev->data->dev_private;
1344 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1345 struct tm_wred_profile *wp;
1348 /* Check input params */
1349 status = wred_profile_check(dev, wred_profile_id, profile, error);
1353 /* Memory allocation */
1354 wp = calloc(1, sizeof(struct tm_wred_profile));
1356 return -rte_tm_error_set(error,
1358 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1360 rte_strerror(ENOMEM));
1363 wp->wred_profile_id = wred_profile_id;
1364 memcpy(&wp->params, profile, sizeof(wp->params));
1367 TAILQ_INSERT_TAIL(wpl, wp, node);
1368 p->soft.tm.h.n_wred_profiles++;
1373 /* Traffic manager WRED profile delete */
1375 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1376 uint32_t wred_profile_id,
1377 struct rte_tm_error *error)
1379 struct pmd_internals *p = dev->data->dev_private;
1380 struct tm_wred_profile *wp;
1382 /* Check existing */
1383 wp = tm_wred_profile_search(dev, wred_profile_id);
1385 return -rte_tm_error_set(error,
1387 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1389 rte_strerror(EINVAL));
1393 return -rte_tm_error_set(error,
1395 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1397 rte_strerror(EBUSY));
1399 /* Remove from list */
1400 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1401 p->soft.tm.h.n_wred_profiles--;
1408 node_add_check_port(struct rte_eth_dev *dev,
1410 uint32_t parent_node_id __rte_unused,
1413 uint32_t level_id __rte_unused,
1414 struct rte_tm_node_params *params,
1415 struct rte_tm_error *error)
1417 struct pmd_internals *p = dev->data->dev_private;
1418 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1419 params->shaper_profile_id);
1421 /* node type: non-leaf */
1422 if (node_id < p->params.tm.n_queues)
1423 return -rte_tm_error_set(error,
1425 RTE_TM_ERROR_TYPE_NODE_ID,
1427 rte_strerror(EINVAL));
1429 /* Priority must be 0 */
1431 return -rte_tm_error_set(error,
1433 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1435 rte_strerror(EINVAL));
1437 /* Weight must be 1 */
1439 return -rte_tm_error_set(error,
1441 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1443 rte_strerror(EINVAL));
1445 /* Shaper must be valid */
1446 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1448 return -rte_tm_error_set(error,
1450 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1452 rte_strerror(EINVAL));
1454 /* No shared shapers */
1455 if (params->n_shared_shapers != 0)
1456 return -rte_tm_error_set(error,
1458 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1460 rte_strerror(EINVAL));
1462 /* Number of SP priorities must be 1 */
1463 if (params->nonleaf.n_sp_priorities != 1)
1464 return -rte_tm_error_set(error,
1466 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1468 rte_strerror(EINVAL));
1471 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1472 return -rte_tm_error_set(error,
1474 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1476 rte_strerror(EINVAL));
1482 node_add_check_subport(struct rte_eth_dev *dev,
1484 uint32_t parent_node_id __rte_unused,
1487 uint32_t level_id __rte_unused,
1488 struct rte_tm_node_params *params,
1489 struct rte_tm_error *error)
1491 struct pmd_internals *p = dev->data->dev_private;
1493 /* node type: non-leaf */
1494 if (node_id < p->params.tm.n_queues)
1495 return -rte_tm_error_set(error,
1497 RTE_TM_ERROR_TYPE_NODE_ID,
1499 rte_strerror(EINVAL));
1501 /* Priority must be 0 */
1503 return -rte_tm_error_set(error,
1505 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1507 rte_strerror(EINVAL));
1509 /* Weight must be 1 */
1511 return -rte_tm_error_set(error,
1513 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1515 rte_strerror(EINVAL));
1517 /* Shaper must be valid */
1518 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1519 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1520 return -rte_tm_error_set(error,
1522 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1524 rte_strerror(EINVAL));
1526 /* No shared shapers */
1527 if (params->n_shared_shapers != 0)
1528 return -rte_tm_error_set(error,
1530 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1532 rte_strerror(EINVAL));
1534 /* Number of SP priorities must be 1 */
1535 if (params->nonleaf.n_sp_priorities != 1)
1536 return -rte_tm_error_set(error,
1538 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1540 rte_strerror(EINVAL));
1543 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1544 return -rte_tm_error_set(error,
1546 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1548 rte_strerror(EINVAL));
1554 node_add_check_pipe(struct rte_eth_dev *dev,
1556 uint32_t parent_node_id __rte_unused,
1558 uint32_t weight __rte_unused,
1559 uint32_t level_id __rte_unused,
1560 struct rte_tm_node_params *params,
1561 struct rte_tm_error *error)
1563 struct pmd_internals *p = dev->data->dev_private;
1565 /* node type: non-leaf */
1566 if (node_id < p->params.tm.n_queues)
1567 return -rte_tm_error_set(error,
1569 RTE_TM_ERROR_TYPE_NODE_ID,
1571 rte_strerror(EINVAL));
1573 /* Priority must be 0 */
1575 return -rte_tm_error_set(error,
1577 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1579 rte_strerror(EINVAL));
1581 /* Shaper must be valid */
1582 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1583 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1584 return -rte_tm_error_set(error,
1586 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1588 rte_strerror(EINVAL));
1590 /* No shared shapers */
1591 if (params->n_shared_shapers != 0)
1592 return -rte_tm_error_set(error,
1594 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1596 rte_strerror(EINVAL));
1598 /* Number of SP priorities must be 4 */
1599 if (params->nonleaf.n_sp_priorities !=
1600 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1601 return -rte_tm_error_set(error,
1603 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1605 rte_strerror(EINVAL));
1607 /* WFQ mode must be byte mode */
1608 if (params->nonleaf.wfq_weight_mode != NULL &&
1609 params->nonleaf.wfq_weight_mode[0] != 0 &&
1610 params->nonleaf.wfq_weight_mode[1] != 0 &&
1611 params->nonleaf.wfq_weight_mode[2] != 0 &&
1612 params->nonleaf.wfq_weight_mode[3] != 0)
1613 return -rte_tm_error_set(error,
1615 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1617 rte_strerror(EINVAL));
1620 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1621 return -rte_tm_error_set(error,
1623 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1625 rte_strerror(EINVAL));
1631 node_add_check_tc(struct rte_eth_dev *dev,
1633 uint32_t parent_node_id __rte_unused,
1634 uint32_t priority __rte_unused,
1636 uint32_t level_id __rte_unused,
1637 struct rte_tm_node_params *params,
1638 struct rte_tm_error *error)
1640 struct pmd_internals *p = dev->data->dev_private;
1642 /* node type: non-leaf */
1643 if (node_id < p->params.tm.n_queues)
1644 return -rte_tm_error_set(error,
1646 RTE_TM_ERROR_TYPE_NODE_ID,
1648 rte_strerror(EINVAL));
1650 /* Weight must be 1 */
1652 return -rte_tm_error_set(error,
1654 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1656 rte_strerror(EINVAL));
1658 /* Shaper must be valid */
1659 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1660 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1661 return -rte_tm_error_set(error,
1663 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1665 rte_strerror(EINVAL));
1667 /* Single valid shared shaper */
1668 if (params->n_shared_shapers > 1)
1669 return -rte_tm_error_set(error,
1671 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1673 rte_strerror(EINVAL));
1675 if (params->n_shared_shapers == 1 &&
1676 (params->shared_shaper_id == NULL ||
1677 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1678 return -rte_tm_error_set(error,
1680 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1682 rte_strerror(EINVAL));
1684 /* Number of priorities must be 1 */
1685 if (params->nonleaf.n_sp_priorities != 1)
1686 return -rte_tm_error_set(error,
1688 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1690 rte_strerror(EINVAL));
1693 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1694 return -rte_tm_error_set(error,
1696 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1698 rte_strerror(EINVAL));
1704 node_add_check_queue(struct rte_eth_dev *dev,
1706 uint32_t parent_node_id __rte_unused,
1708 uint32_t weight __rte_unused,
1709 uint32_t level_id __rte_unused,
1710 struct rte_tm_node_params *params,
1711 struct rte_tm_error *error)
1713 struct pmd_internals *p = dev->data->dev_private;
1715 /* node type: leaf */
1716 if (node_id >= p->params.tm.n_queues)
1717 return -rte_tm_error_set(error,
1719 RTE_TM_ERROR_TYPE_NODE_ID,
1721 rte_strerror(EINVAL));
1723 /* Priority must be 0 */
1725 return -rte_tm_error_set(error,
1727 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1729 rte_strerror(EINVAL));
1732 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1733 return -rte_tm_error_set(error,
1735 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1737 rte_strerror(EINVAL));
1739 /* No shared shapers */
1740 if (params->n_shared_shapers != 0)
1741 return -rte_tm_error_set(error,
1743 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1745 rte_strerror(EINVAL));
1747 /* Congestion management must not be head drop */
1748 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1749 return -rte_tm_error_set(error,
1751 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1753 rte_strerror(EINVAL));
1755 /* Congestion management set to WRED */
1756 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1757 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1758 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1761 /* WRED profile (for private WRED context) must be valid */
1762 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1764 return -rte_tm_error_set(error,
1766 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1768 rte_strerror(EINVAL));
1770 /* No shared WRED contexts */
1771 if (params->leaf.wred.n_shared_wred_contexts != 0)
1772 return -rte_tm_error_set(error,
1774 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1776 rte_strerror(EINVAL));
1780 if (params->stats_mask & ~STATS_MASK_QUEUE)
1781 return -rte_tm_error_set(error,
1783 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1785 rte_strerror(EINVAL));
1791 node_add_check(struct rte_eth_dev *dev,
1793 uint32_t parent_node_id,
1797 struct rte_tm_node_params *params,
1798 struct rte_tm_error *error)
1804 /* node_id, parent_node_id:
1805 * -node_id must not be RTE_TM_NODE_ID_NULL
1806 * -node_id must not be in use
1807 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1808 * -root node must not exist
1809 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1810 * -parent_node_id must be valid
1812 if (node_id == RTE_TM_NODE_ID_NULL)
1813 return -rte_tm_error_set(error,
1815 RTE_TM_ERROR_TYPE_NODE_ID,
1817 rte_strerror(EINVAL));
1819 if (tm_node_search(dev, node_id))
1820 return -rte_tm_error_set(error,
1822 RTE_TM_ERROR_TYPE_NODE_ID,
1824 rte_strerror(EEXIST));
1826 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1828 if (tm_root_node_present(dev))
1829 return -rte_tm_error_set(error,
1831 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1833 rte_strerror(EEXIST));
1835 pn = tm_node_search(dev, parent_node_id);
1837 return -rte_tm_error_set(error,
1839 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1841 rte_strerror(EINVAL));
1844 /* priority: must be 0 .. 3 */
1845 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1846 return -rte_tm_error_set(error,
1848 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1850 rte_strerror(EINVAL));
1852 /* weight: must be 1 .. 255 */
1853 if (weight == 0 || weight >= UINT8_MAX)
1854 return -rte_tm_error_set(error,
1856 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1858 rte_strerror(EINVAL));
1860 /* level_id: if valid, then
1861 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1862 * -level_id must be zero
1863 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1864 * -level_id must be parent level ID plus one
1866 level = (pn == NULL) ? 0 : pn->level + 1;
1867 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1868 return -rte_tm_error_set(error,
1870 RTE_TM_ERROR_TYPE_LEVEL_ID,
1872 rte_strerror(EINVAL));
1874 /* params: must not be NULL */
1876 return -rte_tm_error_set(error,
1878 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1880 rte_strerror(EINVAL));
1882 /* params: per level checks */
1884 case TM_NODE_LEVEL_PORT:
1885 status = node_add_check_port(dev, node_id,
1886 parent_node_id, priority, weight, level_id,
1892 case TM_NODE_LEVEL_SUBPORT:
1893 status = node_add_check_subport(dev, node_id,
1894 parent_node_id, priority, weight, level_id,
1900 case TM_NODE_LEVEL_PIPE:
1901 status = node_add_check_pipe(dev, node_id,
1902 parent_node_id, priority, weight, level_id,
1908 case TM_NODE_LEVEL_TC:
1909 status = node_add_check_tc(dev, node_id,
1910 parent_node_id, priority, weight, level_id,
1916 case TM_NODE_LEVEL_QUEUE:
1917 status = node_add_check_queue(dev, node_id,
1918 parent_node_id, priority, weight, level_id,
1925 return -rte_tm_error_set(error,
1927 RTE_TM_ERROR_TYPE_LEVEL_ID,
1929 rte_strerror(EINVAL));
1935 /* Traffic manager node add */
1937 pmd_tm_node_add(struct rte_eth_dev *dev,
1939 uint32_t parent_node_id,
1943 struct rte_tm_node_params *params,
1944 struct rte_tm_error *error)
1946 struct pmd_internals *p = dev->data->dev_private;
1947 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1953 if (p->soft.tm.hierarchy_frozen)
1954 return -rte_tm_error_set(error,
1956 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1958 rte_strerror(EBUSY));
1960 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1961 level_id, params, error);
1965 /* Memory allocation */
1966 n = calloc(1, sizeof(struct tm_node));
1968 return -rte_tm_error_set(error,
1970 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1972 rte_strerror(ENOMEM));
1975 n->node_id = node_id;
1976 n->parent_node_id = parent_node_id;
1977 n->priority = priority;
1980 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1981 n->parent_node = tm_node_search(dev, parent_node_id);
1982 n->level = n->parent_node->level + 1;
1985 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1986 n->shaper_profile = tm_shaper_profile_search(dev,
1987 params->shaper_profile_id);
1989 if (n->level == TM_NODE_LEVEL_QUEUE &&
1990 params->leaf.cman == RTE_TM_CMAN_WRED)
1991 n->wred_profile = tm_wred_profile_search(dev,
1992 params->leaf.wred.wred_profile_id);
1994 memcpy(&n->params, params, sizeof(n->params));
1997 TAILQ_INSERT_TAIL(nl, n, node);
1998 p->soft.tm.h.n_nodes++;
2000 /* Update dependencies */
2002 n->parent_node->n_children++;
2004 if (n->shaper_profile)
2005 n->shaper_profile->n_users++;
2007 for (i = 0; i < params->n_shared_shapers; i++) {
2008 struct tm_shared_shaper *ss;
2010 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
2014 if (n->wred_profile)
2015 n->wred_profile->n_users++;
2017 p->soft.tm.h.n_tm_nodes[n->level]++;
2022 /* Traffic manager node delete */
2024 pmd_tm_node_delete(struct rte_eth_dev *dev,
2026 struct rte_tm_error *error)
2028 struct pmd_internals *p = dev->data->dev_private;
2032 /* Check hierarchy changes are currently allowed */
2033 if (p->soft.tm.hierarchy_frozen)
2034 return -rte_tm_error_set(error,
2036 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2038 rte_strerror(EBUSY));
2040 /* Check existing */
2041 n = tm_node_search(dev, node_id);
2043 return -rte_tm_error_set(error,
2045 RTE_TM_ERROR_TYPE_NODE_ID,
2047 rte_strerror(EINVAL));
2051 return -rte_tm_error_set(error,
2053 RTE_TM_ERROR_TYPE_NODE_ID,
2055 rte_strerror(EBUSY));
2057 /* Update dependencies */
2058 p->soft.tm.h.n_tm_nodes[n->level]--;
2060 if (n->wred_profile)
2061 n->wred_profile->n_users--;
2063 for (i = 0; i < n->params.n_shared_shapers; i++) {
2064 struct tm_shared_shaper *ss;
2066 ss = tm_shared_shaper_search(dev,
2067 n->params.shared_shaper_id[i]);
2071 if (n->shaper_profile)
2072 n->shaper_profile->n_users--;
2075 n->parent_node->n_children--;
2077 /* Remove from list */
2078 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2079 p->soft.tm.h.n_nodes--;
2087 pipe_profile_build(struct rte_eth_dev *dev,
2089 struct rte_sched_pipe_params *pp)
2091 struct pmd_internals *p = dev->data->dev_private;
2092 struct tm_hierarchy *h = &p->soft.tm.h;
2093 struct tm_node_list *nl = &h->nodes;
2094 struct tm_node *nt, *nq;
2096 memset(pp, 0, sizeof(*pp));
2099 pp->tb_rate = np->shaper_profile->params.peak.rate;
2100 pp->tb_size = np->shaper_profile->params.peak.size;
2102 /* Traffic Class (TC) */
2103 pp->tc_period = PIPE_TC_PERIOD;
2105 pp->tc_ov_weight = np->weight;
2107 TAILQ_FOREACH(nt, nl, node) {
2108 uint32_t queue_id = 0;
2110 if (nt->level != TM_NODE_LEVEL_TC ||
2111 nt->parent_node_id != np->node_id)
2114 pp->tc_rate[nt->priority] =
2115 nt->shaper_profile->params.peak.rate;
2118 TAILQ_FOREACH(nq, nl, node) {
2120 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2121 nq->parent_node_id != nt->node_id)
2124 if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2125 pp->wrr_weights[queue_id] = nq->weight;
2133 pipe_profile_free_exists(struct rte_eth_dev *dev,
2134 uint32_t *pipe_profile_id)
2136 struct pmd_internals *p = dev->data->dev_private;
2137 struct tm_params *t = &p->soft.tm.params;
2139 if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2140 *pipe_profile_id = t->n_pipe_profiles;
2148 pipe_profile_exists(struct rte_eth_dev *dev,
2149 struct rte_sched_pipe_params *pp,
2150 uint32_t *pipe_profile_id)
2152 struct pmd_internals *p = dev->data->dev_private;
2153 struct tm_params *t = &p->soft.tm.params;
2156 for (i = 0; i < t->n_pipe_profiles; i++)
2157 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2158 if (pipe_profile_id)
2159 *pipe_profile_id = i;
2167 pipe_profile_install(struct rte_eth_dev *dev,
2168 struct rte_sched_pipe_params *pp,
2169 uint32_t pipe_profile_id)
2171 struct pmd_internals *p = dev->data->dev_private;
2172 struct tm_params *t = &p->soft.tm.params;
2174 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2175 t->n_pipe_profiles++;
2179 pipe_profile_mark(struct rte_eth_dev *dev,
2180 uint32_t subport_id,
2182 uint32_t pipe_profile_id)
2184 struct pmd_internals *p = dev->data->dev_private;
2185 struct tm_hierarchy *h = &p->soft.tm.h;
2186 struct tm_params *t = &p->soft.tm.params;
2187 uint32_t n_pipes_per_subport, pos;
2189 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2190 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2191 pos = subport_id * n_pipes_per_subport + pipe_id;
2193 t->pipe_to_profile[pos] = pipe_profile_id;
2196 static struct rte_sched_pipe_params *
2197 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2199 struct pmd_internals *p = dev->data->dev_private;
2200 struct tm_hierarchy *h = &p->soft.tm.h;
2201 struct tm_params *t = &p->soft.tm.params;
2202 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2203 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2205 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2206 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2208 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2209 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2211 return &t->pipe_profiles[pipe_profile_id];
2215 pipe_profiles_generate(struct rte_eth_dev *dev)
2217 struct pmd_internals *p = dev->data->dev_private;
2218 struct tm_hierarchy *h = &p->soft.tm.h;
2219 struct tm_node_list *nl = &h->nodes;
2220 struct tm_node *ns, *np;
2221 uint32_t subport_id;
2223 /* Objective: Fill in the following fields in struct tm_params:
2230 TAILQ_FOREACH(ns, nl, node) {
2233 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2237 TAILQ_FOREACH(np, nl, node) {
2238 struct rte_sched_pipe_params pp;
2241 if (np->level != TM_NODE_LEVEL_PIPE ||
2242 np->parent_node_id != ns->node_id)
2245 pipe_profile_build(dev, np, &pp);
2247 if (!pipe_profile_exists(dev, &pp, &pos)) {
2248 if (!pipe_profile_free_exists(dev, &pos))
2251 pipe_profile_install(dev, &pp, pos);
2254 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2265 static struct tm_wred_profile *
2266 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2268 struct pmd_internals *p = dev->data->dev_private;
2269 struct tm_hierarchy *h = &p->soft.tm.h;
2270 struct tm_node_list *nl = &h->nodes;
2273 TAILQ_FOREACH(nq, nl, node) {
2274 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2275 nq->parent_node->priority != tc_id)
2278 return nq->wred_profile;
2284 #ifdef RTE_SCHED_RED
2287 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
2289 struct pmd_internals *p = dev->data->dev_private;
2290 struct rte_sched_subport_params *pp =
2291 &p->soft.tm.params.subport_params[subport_id];
2294 enum rte_color color;
2296 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2297 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2298 struct rte_red_params *dst =
2299 &pp->red_params[tc_id][color];
2300 struct tm_wred_profile *src_wp =
2301 tm_tc_wred_profile_get(dev, tc_id);
2302 struct rte_tm_red_params *src =
2303 &src_wp->params.red_params[color];
2305 memcpy(dst, src, sizeof(*dst));
2311 #define wred_profiles_set(dev, subport_id)
2315 static struct tm_shared_shaper *
2316 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2318 return (tc_node->params.n_shared_shapers) ?
2319 tm_shared_shaper_search(dev,
2320 tc_node->params.shared_shaper_id[0]) :
2324 static struct tm_shared_shaper *
2325 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2326 struct tm_node *subport_node,
2329 struct pmd_internals *p = dev->data->dev_private;
2330 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2333 TAILQ_FOREACH(n, nl, node) {
2334 if (n->level != TM_NODE_LEVEL_TC ||
2335 n->parent_node->parent_node_id !=
2336 subport_node->node_id ||
2337 n->priority != tc_id)
2340 return tm_tc_shared_shaper_get(dev, n);
2347 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2349 struct pmd_internals *p = dev->data->dev_private;
2350 struct tm_hierarchy *h = &p->soft.tm.h;
2351 struct tm_node_list *nl = &h->nodes;
2352 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2353 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2354 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2355 struct tm_shared_shaper *ss;
2357 uint32_t n_pipes_per_subport;
2359 /* Root node exists. */
2361 return -rte_tm_error_set(error,
2363 RTE_TM_ERROR_TYPE_LEVEL_ID,
2365 rte_strerror(EINVAL));
2367 /* There is at least one subport, max is not exceeded. */
2368 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2369 return -rte_tm_error_set(error,
2371 RTE_TM_ERROR_TYPE_LEVEL_ID,
2373 rte_strerror(EINVAL));
2375 /* There is at least one pipe. */
2376 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2377 return -rte_tm_error_set(error,
2379 RTE_TM_ERROR_TYPE_LEVEL_ID,
2381 rte_strerror(EINVAL));
2383 /* Number of pipes is the same for all subports. Maximum number of pipes
2384 * per subport is not exceeded.
2386 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2387 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2389 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2390 return -rte_tm_error_set(error,
2392 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2394 rte_strerror(EINVAL));
2396 TAILQ_FOREACH(ns, nl, node) {
2397 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2400 if (ns->n_children != n_pipes_per_subport)
2401 return -rte_tm_error_set(error,
2403 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2405 rte_strerror(EINVAL));
2408 /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2409 TAILQ_FOREACH(np, nl, node) {
2410 uint32_t mask = 0, mask_expected =
2411 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2414 if (np->level != TM_NODE_LEVEL_PIPE)
2417 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2418 return -rte_tm_error_set(error,
2420 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2422 rte_strerror(EINVAL));
2424 TAILQ_FOREACH(nt, nl, node) {
2425 if (nt->level != TM_NODE_LEVEL_TC ||
2426 nt->parent_node_id != np->node_id)
2429 mask |= 1 << nt->priority;
2432 if (mask != mask_expected)
2433 return -rte_tm_error_set(error,
2435 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2437 rte_strerror(EINVAL));
2440 /** Each Strict priority TC has exactly 1 packet queues while
2441 * lowest priority TC (Best-effort) has 4 queues.
2443 TAILQ_FOREACH(nt, nl, node) {
2444 if (nt->level != TM_NODE_LEVEL_TC)
2447 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2448 return -rte_tm_error_set(error,
2450 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2452 rte_strerror(EINVAL));
2457 * -For each TC #i, all pipes in the same subport use the same
2458 * shared shaper (or no shared shaper) for their TC#i.
2459 * -Each shared shaper needs to have at least one user. All its
2460 * users have to be TC nodes with the same priority and the same
2463 TAILQ_FOREACH(ns, nl, node) {
2464 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2467 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2470 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2471 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2473 TAILQ_FOREACH(nt, nl, node) {
2474 struct tm_shared_shaper *subport_ss, *tc_ss;
2476 if (nt->level != TM_NODE_LEVEL_TC ||
2477 nt->parent_node->parent_node_id !=
2481 subport_ss = s[nt->priority];
2482 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2484 if (subport_ss == NULL && tc_ss == NULL)
2487 if ((subport_ss == NULL && tc_ss != NULL) ||
2488 (subport_ss != NULL && tc_ss == NULL) ||
2489 subport_ss->shared_shaper_id !=
2490 tc_ss->shared_shaper_id)
2491 return -rte_tm_error_set(error,
2493 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2495 rte_strerror(EINVAL));
2499 TAILQ_FOREACH(ss, ssl, node) {
2500 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2501 uint32_t n_users = 0;
2504 TAILQ_FOREACH(nt, nl, node) {
2505 if (nt->level != TM_NODE_LEVEL_TC ||
2506 nt->priority != nt_any->priority ||
2507 nt->parent_node->parent_node_id !=
2508 nt_any->parent_node->parent_node_id)
2514 if (ss->n_users == 0 || ss->n_users != n_users)
2515 return -rte_tm_error_set(error,
2517 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2519 rte_strerror(EINVAL));
2522 /* Not too many pipe profiles. */
2523 if (pipe_profiles_generate(dev))
2524 return -rte_tm_error_set(error,
2526 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2528 rte_strerror(EINVAL));
2531 * WRED (when used, i.e. at least one WRED profile defined):
2532 * -Each WRED profile must have at least one user.
2533 * -All leaf nodes must have their private WRED context enabled.
2534 * -For each TC #i, all leaf nodes must use the same WRED profile
2535 * for their private WRED context.
2537 if (h->n_wred_profiles) {
2538 struct tm_wred_profile *wp;
2539 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2542 TAILQ_FOREACH(wp, wpl, node)
2543 if (wp->n_users == 0)
2544 return -rte_tm_error_set(error,
2546 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2548 rte_strerror(EINVAL));
2550 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2551 w[id] = tm_tc_wred_profile_get(dev, id);
2554 return -rte_tm_error_set(error,
2556 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2558 rte_strerror(EINVAL));
2561 TAILQ_FOREACH(nq, nl, node) {
2564 if (nq->level != TM_NODE_LEVEL_QUEUE)
2567 id = nq->parent_node->priority;
2569 if (nq->wred_profile == NULL ||
2570 nq->wred_profile->wred_profile_id !=
2571 w[id]->wred_profile_id)
2572 return -rte_tm_error_set(error,
2574 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2576 rte_strerror(EINVAL));
2584 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2586 struct pmd_internals *p = dev->data->dev_private;
2587 struct tm_params *t = &p->soft.tm.params;
2588 struct tm_hierarchy *h = &p->soft.tm.h;
2590 struct tm_node_list *nl = &h->nodes;
2591 struct tm_node *root = tm_root_node_present(dev), *n;
2593 uint32_t subport_id;
2595 t->port_params = (struct rte_sched_port_params) {
2596 .name = dev->data->name,
2597 .socket = dev->data->numa_node,
2598 .rate = root->shaper_profile->params.peak.rate,
2599 .mtu = dev->data->mtu,
2601 root->shaper_profile->params.pkt_length_adjust,
2602 .n_subports_per_port = root->n_children,
2603 .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
2607 TAILQ_FOREACH(n, nl, node) {
2608 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2611 if (n->level != TM_NODE_LEVEL_SUBPORT)
2614 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2615 struct tm_shared_shaper *ss;
2616 struct tm_shaper_profile *sp;
2618 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2619 sp = (ss) ? tm_shaper_profile_search(dev,
2620 ss->shaper_profile_id) :
2622 tc_rate[i] = sp->params.peak.rate;
2625 t->subport_params[subport_id] =
2626 (struct rte_sched_subport_params) {
2627 .tb_rate = n->shaper_profile->params.peak.rate,
2628 .tb_size = n->shaper_profile->params.peak.size,
2630 .tc_rate = {tc_rate[0],
2644 .tc_period = SUBPORT_TC_PERIOD,
2645 .n_pipes_per_subport_enabled =
2646 h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2647 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2648 .qsize = {p->params.tm.qsize[0],
2649 p->params.tm.qsize[1],
2650 p->params.tm.qsize[2],
2651 p->params.tm.qsize[3],
2652 p->params.tm.qsize[4],
2653 p->params.tm.qsize[5],
2654 p->params.tm.qsize[6],
2655 p->params.tm.qsize[7],
2656 p->params.tm.qsize[8],
2657 p->params.tm.qsize[9],
2658 p->params.tm.qsize[10],
2659 p->params.tm.qsize[11],
2660 p->params.tm.qsize[12],
2662 .pipe_profiles = t->pipe_profiles,
2663 .n_pipe_profiles = t->n_pipe_profiles,
2664 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2666 wred_profiles_set(dev, subport_id);
2671 /* Traffic manager hierarchy commit */
2673 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2675 struct rte_tm_error *error)
2677 struct pmd_internals *p = dev->data->dev_private;
2681 if (p->soft.tm.hierarchy_frozen)
2682 return -rte_tm_error_set(error,
2684 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2686 rte_strerror(EBUSY));
2688 status = hierarchy_commit_check(dev, error);
2691 tm_hierarchy_free(p);
2696 /* Create blueprints */
2697 hierarchy_blueprints_create(dev);
2699 /* Freeze hierarchy */
2700 p->soft.tm.hierarchy_frozen = 1;
2705 #ifdef RTE_SCHED_SUBPORT_TC_OV
2708 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2710 struct pmd_internals *p = dev->data->dev_private;
2711 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2713 struct tm_node *ns = np->parent_node;
2714 uint32_t subport_id = tm_node_subport_id(dev, ns);
2716 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2717 struct rte_sched_pipe_params profile1;
2718 uint32_t pipe_profile_id;
2720 /* Derive new pipe profile. */
2721 memcpy(&profile1, profile0, sizeof(profile1));
2722 profile1.tc_ov_weight = (uint8_t)weight;
2724 /* Since implementation does not allow adding more pipe profiles after
2725 * port configuration, the pipe configuration can be successfully
2726 * updated only if the new profile is also part of the existing set of
2729 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2732 /* Update the pipe profile used by the current pipe. */
2733 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2734 (int32_t)pipe_profile_id))
2737 /* Commit changes. */
2738 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2739 np->weight = weight;
2747 update_queue_weight(struct rte_eth_dev *dev,
2748 struct tm_node *nq, uint32_t weight)
2750 struct pmd_internals *p = dev->data->dev_private;
2751 uint32_t queue_id = tm_node_queue_id(dev, nq);
2753 struct tm_node *nt = nq->parent_node;
2755 struct tm_node *np = nt->parent_node;
2756 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2758 struct tm_node *ns = np->parent_node;
2759 uint32_t subport_id = tm_node_subport_id(dev, ns);
2761 uint32_t pipe_be_queue_id =
2762 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2764 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2765 struct rte_sched_pipe_params profile1;
2766 uint32_t pipe_profile_id;
2768 /* Derive new pipe profile. */
2769 memcpy(&profile1, profile0, sizeof(profile1));
2770 profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2772 /* Since implementation does not allow adding more pipe profiles after
2773 * port configuration, the pipe configuration can be successfully
2774 * updated only if the new profile is also part of the existing set
2777 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2780 /* Update the pipe profile used by the current pipe. */
2781 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2782 (int32_t)pipe_profile_id))
2785 /* Commit changes. */
2786 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2787 nq->weight = weight;
2792 /* Traffic manager node parent update */
2794 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2796 uint32_t parent_node_id,
2799 struct rte_tm_error *error)
2803 /* Port must be started and TM used. */
2804 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2805 return -rte_tm_error_set(error,
2807 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2809 rte_strerror(EBUSY));
2811 /* Node must be valid */
2812 n = tm_node_search(dev, node_id);
2814 return -rte_tm_error_set(error,
2816 RTE_TM_ERROR_TYPE_NODE_ID,
2818 rte_strerror(EINVAL));
2820 /* Parent node must be the same */
2821 if (n->parent_node_id != parent_node_id)
2822 return -rte_tm_error_set(error,
2824 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2826 rte_strerror(EINVAL));
2828 /* Priority must be the same */
2829 if (n->priority != priority)
2830 return -rte_tm_error_set(error,
2832 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2834 rte_strerror(EINVAL));
2836 /* weight: must be 1 .. 255 */
2837 if (weight == 0 || weight >= UINT8_MAX)
2838 return -rte_tm_error_set(error,
2840 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2842 rte_strerror(EINVAL));
2845 case TM_NODE_LEVEL_PORT:
2846 return -rte_tm_error_set(error,
2848 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2850 rte_strerror(EINVAL));
2852 case TM_NODE_LEVEL_SUBPORT:
2853 return -rte_tm_error_set(error,
2855 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2857 rte_strerror(EINVAL));
2859 case TM_NODE_LEVEL_PIPE:
2860 #ifdef RTE_SCHED_SUBPORT_TC_OV
2861 if (update_pipe_weight(dev, n, weight))
2862 return -rte_tm_error_set(error,
2864 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2866 rte_strerror(EINVAL));
2869 return -rte_tm_error_set(error,
2871 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2873 rte_strerror(EINVAL));
2876 case TM_NODE_LEVEL_TC:
2877 return -rte_tm_error_set(error,
2879 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2881 rte_strerror(EINVAL));
2883 case TM_NODE_LEVEL_QUEUE:
2886 if (update_queue_weight(dev, n, weight))
2887 return -rte_tm_error_set(error,
2889 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2891 rte_strerror(EINVAL));
2897 update_subport_rate(struct rte_eth_dev *dev,
2899 struct tm_shaper_profile *sp)
2901 struct pmd_internals *p = dev->data->dev_private;
2902 uint32_t subport_id = tm_node_subport_id(dev, ns);
2904 struct rte_sched_subport_params subport_params;
2906 /* Derive new subport configuration. */
2907 memcpy(&subport_params,
2908 &p->soft.tm.params.subport_params[subport_id],
2909 sizeof(subport_params));
2910 subport_params.tb_rate = sp->params.peak.rate;
2911 subport_params.tb_size = sp->params.peak.size;
2913 /* Update the subport configuration. */
2914 if (rte_sched_subport_config(SCHED(p), subport_id,
2918 /* Commit changes. */
2919 ns->shaper_profile->n_users--;
2921 ns->shaper_profile = sp;
2922 ns->params.shaper_profile_id = sp->shaper_profile_id;
2925 memcpy(&p->soft.tm.params.subport_params[subport_id],
2927 sizeof(subport_params));
2933 update_pipe_rate(struct rte_eth_dev *dev,
2935 struct tm_shaper_profile *sp)
2937 struct pmd_internals *p = dev->data->dev_private;
2938 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2940 struct tm_node *ns = np->parent_node;
2941 uint32_t subport_id = tm_node_subport_id(dev, ns);
2943 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2944 struct rte_sched_pipe_params profile1;
2945 uint32_t pipe_profile_id;
2947 /* Derive new pipe profile. */
2948 memcpy(&profile1, profile0, sizeof(profile1));
2949 profile1.tb_rate = sp->params.peak.rate;
2950 profile1.tb_size = sp->params.peak.size;
2952 /* Since implementation does not allow adding more pipe profiles after
2953 * port configuration, the pipe configuration can be successfully
2954 * updated only if the new profile is also part of the existing set of
2957 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2960 /* Update the pipe profile used by the current pipe. */
2961 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2962 (int32_t)pipe_profile_id))
2965 /* Commit changes. */
2966 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2967 np->shaper_profile->n_users--;
2968 np->shaper_profile = sp;
2969 np->params.shaper_profile_id = sp->shaper_profile_id;
2976 update_tc_rate(struct rte_eth_dev *dev,
2978 struct tm_shaper_profile *sp)
2980 struct pmd_internals *p = dev->data->dev_private;
2981 uint32_t tc_id = tm_node_tc_id(dev, nt);
2983 struct tm_node *np = nt->parent_node;
2984 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2986 struct tm_node *ns = np->parent_node;
2987 uint32_t subport_id = tm_node_subport_id(dev, ns);
2989 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2990 struct rte_sched_pipe_params profile1;
2991 uint32_t pipe_profile_id;
2993 /* Derive new pipe profile. */
2994 memcpy(&profile1, profile0, sizeof(profile1));
2995 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2997 /* Since implementation does not allow adding more pipe profiles after
2998 * port configuration, the pipe configuration can be successfully
2999 * updated only if the new profile is also part of the existing set of
3002 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3005 /* Update the pipe profile used by the current pipe. */
3006 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3007 (int32_t)pipe_profile_id))
3010 /* Commit changes. */
3011 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3012 nt->shaper_profile->n_users--;
3013 nt->shaper_profile = sp;
3014 nt->params.shaper_profile_id = sp->shaper_profile_id;
3020 /* Traffic manager node shaper update */
3022 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
3024 uint32_t shaper_profile_id,
3025 struct rte_tm_error *error)
3028 struct tm_shaper_profile *sp;
3030 /* Port must be started and TM used. */
3031 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3032 return -rte_tm_error_set(error,
3034 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3036 rte_strerror(EBUSY));
3038 /* Node must be valid */
3039 n = tm_node_search(dev, node_id);
3041 return -rte_tm_error_set(error,
3043 RTE_TM_ERROR_TYPE_NODE_ID,
3045 rte_strerror(EINVAL));
3047 /* Shaper profile must be valid. */
3048 sp = tm_shaper_profile_search(dev, shaper_profile_id);
3050 return -rte_tm_error_set(error,
3052 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
3054 rte_strerror(EINVAL));
3057 case TM_NODE_LEVEL_PORT:
3058 return -rte_tm_error_set(error,
3060 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3062 rte_strerror(EINVAL));
3064 case TM_NODE_LEVEL_SUBPORT:
3065 if (update_subport_rate(dev, n, sp))
3066 return -rte_tm_error_set(error,
3068 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3070 rte_strerror(EINVAL));
3073 case TM_NODE_LEVEL_PIPE:
3074 if (update_pipe_rate(dev, n, sp))
3075 return -rte_tm_error_set(error,
3077 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3079 rte_strerror(EINVAL));
3082 case TM_NODE_LEVEL_TC:
3083 if (update_tc_rate(dev, n, sp))
3084 return -rte_tm_error_set(error,
3086 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3088 rte_strerror(EINVAL));
3091 case TM_NODE_LEVEL_QUEUE:
3094 return -rte_tm_error_set(error,
3096 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3098 rte_strerror(EINVAL));
3102 static inline uint32_t
3103 tm_port_queue_id(struct rte_eth_dev *dev,
3104 uint32_t port_subport_id,
3105 uint32_t subport_pipe_id,
3106 uint32_t pipe_tc_id,
3107 uint32_t tc_queue_id)
3109 struct pmd_internals *p = dev->data->dev_private;
3110 struct tm_hierarchy *h = &p->soft.tm.h;
3111 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3112 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3114 uint32_t port_pipe_id =
3115 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3117 uint32_t port_queue_id =
3118 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3120 return port_queue_id;
3124 read_port_stats(struct rte_eth_dev *dev,
3126 struct rte_tm_node_stats *stats,
3127 uint64_t *stats_mask,
3130 struct pmd_internals *p = dev->data->dev_private;
3131 struct tm_hierarchy *h = &p->soft.tm.h;
3132 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3133 uint32_t subport_id;
3135 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3136 struct rte_sched_subport_stats s;
3140 int status = rte_sched_subport_read_stats(SCHED(p),
3147 /* Stats accumulate */
3148 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3150 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3151 nr->stats.n_bytes +=
3152 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3153 nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3154 s.n_pkts_tc_dropped[id];
3155 nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3156 s.n_bytes_tc_dropped[id];
3162 memcpy(stats, &nr->stats, sizeof(*stats));
3165 *stats_mask = STATS_MASK_DEFAULT;
3169 memset(&nr->stats, 0, sizeof(nr->stats));
3175 read_subport_stats(struct rte_eth_dev *dev,
3177 struct rte_tm_node_stats *stats,
3178 uint64_t *stats_mask,
3181 struct pmd_internals *p = dev->data->dev_private;
3182 uint32_t subport_id = tm_node_subport_id(dev, ns);
3183 struct rte_sched_subport_stats s;
3184 uint32_t tc_ov, tc_id;
3187 int status = rte_sched_subport_read_stats(SCHED(p),
3194 /* Stats accumulate */
3195 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3197 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3198 ns->stats.n_bytes +=
3199 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3200 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3201 s.n_pkts_tc_dropped[tc_id];
3202 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3203 s.n_bytes_tc_dropped[tc_id];
3208 memcpy(stats, &ns->stats, sizeof(*stats));
3211 *stats_mask = STATS_MASK_DEFAULT;
3215 memset(&ns->stats, 0, sizeof(ns->stats));
3221 read_pipe_stats(struct rte_eth_dev *dev,
3223 struct rte_tm_node_stats *stats,
3224 uint64_t *stats_mask,
3227 struct pmd_internals *p = dev->data->dev_private;
3229 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3231 struct tm_node *ns = np->parent_node;
3232 uint32_t subport_id = tm_node_subport_id(dev, ns);
3233 uint32_t tc_id, queue_id;
3237 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3238 struct rte_sched_queue_stats s;
3241 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3245 tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3246 queue_id = i - tc_id;
3249 uint32_t qid = tm_port_queue_id(dev,
3255 int status = rte_sched_queue_read_stats(SCHED(p),
3262 /* Stats accumulate */
3263 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3264 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3265 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3266 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3268 np->stats.leaf.n_pkts_queued = qlen;
3273 memcpy(stats, &np->stats, sizeof(*stats));
3276 *stats_mask = STATS_MASK_DEFAULT;
3280 memset(&np->stats, 0, sizeof(np->stats));
3286 read_tc_stats(struct rte_eth_dev *dev,
3288 struct rte_tm_node_stats *stats,
3289 uint64_t *stats_mask,
3292 struct pmd_internals *p = dev->data->dev_private;
3294 uint32_t tc_id = tm_node_tc_id(dev, nt);
3296 struct tm_node *np = nt->parent_node;
3297 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3299 struct tm_node *ns = np->parent_node;
3300 uint32_t subport_id = tm_node_subport_id(dev, ns);
3301 struct rte_sched_queue_stats s;
3307 if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3308 qid = tm_port_queue_id(dev,
3314 status = rte_sched_queue_read_stats(SCHED(p),
3321 /* Stats accumulate */
3322 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3323 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3324 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3325 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3327 nt->stats.leaf.n_pkts_queued = qlen;
3329 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3330 qid = tm_port_queue_id(dev,
3336 status = rte_sched_queue_read_stats(SCHED(p),
3343 /* Stats accumulate */
3344 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3345 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3346 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3348 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3350 nt->stats.leaf.n_pkts_queued = qlen;
3356 memcpy(stats, &nt->stats, sizeof(*stats));
3359 *stats_mask = STATS_MASK_DEFAULT;
3363 memset(&nt->stats, 0, sizeof(nt->stats));
3369 read_queue_stats(struct rte_eth_dev *dev,
3371 struct rte_tm_node_stats *stats,
3372 uint64_t *stats_mask,
3375 struct pmd_internals *p = dev->data->dev_private;
3376 struct rte_sched_queue_stats s;
3379 uint32_t queue_id = tm_node_queue_id(dev, nq);
3381 struct tm_node *nt = nq->parent_node;
3382 uint32_t tc_id = tm_node_tc_id(dev, nt);
3384 struct tm_node *np = nt->parent_node;
3385 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3387 struct tm_node *ns = np->parent_node;
3388 uint32_t subport_id = tm_node_subport_id(dev, ns);
3391 uint32_t qid = tm_port_queue_id(dev,
3397 int status = rte_sched_queue_read_stats(SCHED(p),
3404 /* Stats accumulate */
3405 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3406 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3407 nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3408 nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3410 nq->stats.leaf.n_pkts_queued = qlen;
3414 memcpy(stats, &nq->stats, sizeof(*stats));
3417 *stats_mask = STATS_MASK_QUEUE;
3421 memset(&nq->stats, 0, sizeof(nq->stats));
3426 /* Traffic manager read stats counters for specific node */
3428 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3430 struct rte_tm_node_stats *stats,
3431 uint64_t *stats_mask,
3433 struct rte_tm_error *error)
3437 /* Port must be started and TM used. */
3438 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3439 return -rte_tm_error_set(error,
3441 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3443 rte_strerror(EBUSY));
3445 /* Node must be valid */
3446 n = tm_node_search(dev, node_id);
3448 return -rte_tm_error_set(error,
3450 RTE_TM_ERROR_TYPE_NODE_ID,
3452 rte_strerror(EINVAL));
3455 case TM_NODE_LEVEL_PORT:
3456 if (read_port_stats(dev, n, stats, stats_mask, clear))
3457 return -rte_tm_error_set(error,
3459 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3461 rte_strerror(EINVAL));
3464 case TM_NODE_LEVEL_SUBPORT:
3465 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3466 return -rte_tm_error_set(error,
3468 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3470 rte_strerror(EINVAL));
3473 case TM_NODE_LEVEL_PIPE:
3474 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3475 return -rte_tm_error_set(error,
3477 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3479 rte_strerror(EINVAL));
3482 case TM_NODE_LEVEL_TC:
3483 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3484 return -rte_tm_error_set(error,
3486 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3488 rte_strerror(EINVAL));
3491 case TM_NODE_LEVEL_QUEUE:
3493 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3494 return -rte_tm_error_set(error,
3496 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3498 rte_strerror(EINVAL));
3503 const struct rte_tm_ops pmd_tm_ops = {
3504 .node_type_get = pmd_tm_node_type_get,
3505 .capabilities_get = pmd_tm_capabilities_get,
3506 .level_capabilities_get = pmd_tm_level_capabilities_get,
3507 .node_capabilities_get = pmd_tm_node_capabilities_get,
3509 .wred_profile_add = pmd_tm_wred_profile_add,
3510 .wred_profile_delete = pmd_tm_wred_profile_delete,
3511 .shared_wred_context_add_update = NULL,
3512 .shared_wred_context_delete = NULL,
3514 .shaper_profile_add = pmd_tm_shaper_profile_add,
3515 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3516 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3517 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3519 .node_add = pmd_tm_node_add,
3520 .node_delete = pmd_tm_node_delete,
3521 .node_suspend = NULL,
3522 .node_resume = NULL,
3523 .hierarchy_commit = pmd_tm_hierarchy_commit,
3525 .node_parent_update = pmd_tm_node_parent_update,
3526 .node_shaper_update = pmd_tm_node_shaper_update,
3527 .node_shared_shaper_update = NULL,
3528 .node_stats_update = NULL,
3529 .node_wfq_weight_mode_update = NULL,
3530 .node_cman_update = NULL,
3531 .node_wred_context_update = NULL,
3532 .node_shared_wred_context_update = NULL,
3534 .node_stats_read = pmd_tm_node_stats_read,