1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 softnic_tmgr_init(struct pmd_internals *p)
21 TAILQ_INIT(&p->tmgr_port_list);
27 softnic_tmgr_free(struct pmd_internals *p)
30 struct softnic_tmgr_port *tmgr_port;
32 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33 if (tmgr_port == NULL)
36 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37 rte_sched_port_free(tmgr_port->s);
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
46 struct softnic_tmgr_port *tmgr_port;
51 TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52 if (strcmp(tmgr_port->name, name) == 0)
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
62 struct softnic_tmgr_port *tmgr_port;
63 struct tm_params *t = &p->soft.tm.params;
64 struct rte_sched_port *sched;
65 uint32_t n_subports, subport_id;
67 /* Check input params */
69 softnic_tmgr_port_find(p, name))
76 /* Is hierarchy frozen? */
77 if (p->soft.tm.hierarchy_frozen == 0)
81 sched = rte_sched_port_config(&t->port_params);
86 n_subports = t->port_params.n_subports_per_port;
87 for (subport_id = 0; subport_id < n_subports; subport_id++) {
88 uint32_t n_pipes_per_subport =
89 t->subport_params[subport_id].n_pipes_per_subport_enabled;
93 status = rte_sched_subport_config(sched,
95 &t->subport_params[subport_id],
96 t->subport_to_profile[subport_id]);
98 rte_sched_port_free(sched);
103 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
104 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
105 int profile_id = t->pipe_to_profile[pos];
110 status = rte_sched_pipe_config(sched,
115 rte_sched_port_free(sched);
121 /* Node allocation */
122 tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
123 if (tmgr_port == NULL) {
124 rte_sched_port_free(sched);
129 strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
130 tmgr_port->s = sched;
132 /* Node add to list */
133 TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
138 static struct rte_sched_port *
139 SCHED(struct pmd_internals *p)
141 struct softnic_tmgr_port *tmgr_port;
143 tmgr_port = softnic_tmgr_port_find(p, "TMGR");
144 if (tmgr_port == NULL)
151 tm_hierarchy_init(struct pmd_internals *p)
153 memset(&p->soft.tm, 0, sizeof(p->soft.tm));
155 /* Initialize shaper profile list */
156 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
158 /* Initialize shared shaper list */
159 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
161 /* Initialize wred profile list */
162 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
164 /* Initialize TM node list */
165 TAILQ_INIT(&p->soft.tm.h.nodes);
169 tm_hierarchy_free(struct pmd_internals *p)
171 /* Remove all nodes*/
173 struct tm_node *tm_node;
175 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
179 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
183 /* Remove all WRED profiles */
185 struct tm_wred_profile *wred_profile;
187 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
188 if (wred_profile == NULL)
191 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
195 /* Remove all shared shapers */
197 struct tm_shared_shaper *shared_shaper;
199 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
200 if (shared_shaper == NULL)
203 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
207 /* Remove all shaper profiles */
209 struct tm_shaper_profile *shaper_profile;
211 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
212 if (shaper_profile == NULL)
215 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
216 shaper_profile, node);
217 free(shaper_profile);
220 tm_hierarchy_init(p);
223 static struct tm_shaper_profile *
224 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
226 struct pmd_internals *p = dev->data->dev_private;
227 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
228 struct tm_shaper_profile *sp;
230 TAILQ_FOREACH(sp, spl, node)
231 if (shaper_profile_id == sp->shaper_profile_id)
237 static struct tm_shared_shaper *
238 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
240 struct pmd_internals *p = dev->data->dev_private;
241 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
242 struct tm_shared_shaper *ss;
244 TAILQ_FOREACH(ss, ssl, node)
245 if (shared_shaper_id == ss->shared_shaper_id)
251 static struct tm_wred_profile *
252 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
254 struct pmd_internals *p = dev->data->dev_private;
255 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
256 struct tm_wred_profile *wp;
258 TAILQ_FOREACH(wp, wpl, node)
259 if (wred_profile_id == wp->wred_profile_id)
265 static struct tm_node *
266 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
268 struct pmd_internals *p = dev->data->dev_private;
269 struct tm_node_list *nl = &p->soft.tm.h.nodes;
272 TAILQ_FOREACH(n, nl, node)
273 if (n->node_id == node_id)
279 static struct tm_node *
280 tm_root_node_present(struct rte_eth_dev *dev)
282 struct pmd_internals *p = dev->data->dev_private;
283 struct tm_node_list *nl = &p->soft.tm.h.nodes;
286 TAILQ_FOREACH(n, nl, node)
287 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
294 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
296 struct pmd_internals *p = dev->data->dev_private;
297 struct tm_node_list *nl = &p->soft.tm.h.nodes;
302 TAILQ_FOREACH(ns, nl, node) {
303 if (ns->level != TM_NODE_LEVEL_SUBPORT)
306 if (ns->node_id == subport_node->node_id)
316 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
318 struct pmd_internals *p = dev->data->dev_private;
319 struct tm_node_list *nl = &p->soft.tm.h.nodes;
324 TAILQ_FOREACH(np, nl, node) {
325 if (np->level != TM_NODE_LEVEL_PIPE ||
326 np->parent_node_id != pipe_node->parent_node_id)
329 if (np->node_id == pipe_node->node_id)
339 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
341 return tc_node->priority;
345 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
347 struct pmd_internals *p = dev->data->dev_private;
348 struct tm_node_list *nl = &p->soft.tm.h.nodes;
353 TAILQ_FOREACH(nq, nl, node) {
354 if (nq->level != TM_NODE_LEVEL_QUEUE ||
355 nq->parent_node_id != queue_node->parent_node_id)
358 if (nq->node_id == queue_node->node_id)
368 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
370 struct pmd_internals *p = dev->data->dev_private;
371 uint32_t n_queues_max = p->params.tm.n_queues;
373 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
374 / RTE_SCHED_QUEUES_PER_PIPE;
375 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
376 uint32_t n_subports_max = n_pipes_max;
377 uint32_t n_root_max = 1;
380 case TM_NODE_LEVEL_PORT:
382 case TM_NODE_LEVEL_SUBPORT:
383 return n_subports_max;
384 case TM_NODE_LEVEL_PIPE:
386 case TM_NODE_LEVEL_TC:
388 case TM_NODE_LEVEL_QUEUE:
394 /* Traffic manager node type get */
396 pmd_tm_node_type_get(struct rte_eth_dev *dev,
399 struct rte_tm_error *error)
401 struct pmd_internals *p = dev->data->dev_private;
404 return -rte_tm_error_set(error,
406 RTE_TM_ERROR_TYPE_UNSPECIFIED,
408 rte_strerror(EINVAL));
410 if (node_id == RTE_TM_NODE_ID_NULL ||
411 (tm_node_search(dev, node_id) == NULL))
412 return -rte_tm_error_set(error,
414 RTE_TM_ERROR_TYPE_NODE_ID,
416 rte_strerror(EINVAL));
418 *is_leaf = node_id < p->params.tm.n_queues;
423 #ifdef RTE_SCHED_CMAN
424 #define WRED_SUPPORTED 1
426 #define WRED_SUPPORTED 0
429 #define STATS_MASK_DEFAULT \
430 (RTE_TM_STATS_N_PKTS | \
431 RTE_TM_STATS_N_BYTES | \
432 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
433 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
435 #define STATS_MASK_QUEUE \
436 (STATS_MASK_DEFAULT | \
437 RTE_TM_STATS_N_PKTS_QUEUED)
439 static const struct rte_tm_capabilities tm_cap = {
440 .n_nodes_max = UINT32_MAX,
441 .n_levels_max = TM_NODE_LEVEL_MAX,
443 .non_leaf_nodes_identical = 0,
444 .leaf_nodes_identical = 1,
446 .shaper_n_max = UINT32_MAX,
447 .shaper_private_n_max = UINT32_MAX,
448 .shaper_private_dual_rate_n_max = 0,
449 .shaper_private_rate_min = 1,
450 .shaper_private_rate_max = UINT32_MAX,
451 .shaper_private_packet_mode_supported = 0,
452 .shaper_private_byte_mode_supported = 1,
454 .shaper_shared_n_max = UINT32_MAX,
455 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
456 .shaper_shared_n_shapers_per_node_max = 1,
457 .shaper_shared_dual_rate_n_max = 0,
458 .shaper_shared_rate_min = 1,
459 .shaper_shared_rate_max = UINT32_MAX,
460 .shaper_shared_packet_mode_supported = 0,
461 .shaper_shared_byte_mode_supported = 1,
463 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
464 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
466 .sched_n_children_max = UINT32_MAX,
467 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
468 .sched_wfq_n_children_per_group_max = UINT32_MAX,
469 .sched_wfq_n_groups_max = 1,
470 .sched_wfq_weight_max = UINT32_MAX,
471 .sched_wfq_packet_mode_supported = 0,
472 .sched_wfq_byte_mode_supported = 1,
474 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
475 .cman_wred_byte_mode_supported = 0,
476 .cman_head_drop_supported = 0,
477 .cman_wred_context_n_max = 0,
478 .cman_wred_context_private_n_max = 0,
479 .cman_wred_context_shared_n_max = 0,
480 .cman_wred_context_shared_n_nodes_per_context_max = 0,
481 .cman_wred_context_shared_n_contexts_per_node_max = 0,
483 .mark_vlan_dei_supported = {0, 0, 0},
484 .mark_ip_ecn_tcp_supported = {0, 0, 0},
485 .mark_ip_ecn_sctp_supported = {0, 0, 0},
486 .mark_ip_dscp_supported = {0, 0, 0},
488 .dynamic_update_mask = 0,
490 .stats_mask = STATS_MASK_QUEUE,
493 /* Traffic manager capabilities get */
495 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
496 struct rte_tm_capabilities *cap,
497 struct rte_tm_error *error)
500 return -rte_tm_error_set(error,
502 RTE_TM_ERROR_TYPE_CAPABILITIES,
504 rte_strerror(EINVAL));
506 memcpy(cap, &tm_cap, sizeof(*cap));
508 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
509 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
510 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
511 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
512 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
514 cap->shaper_private_n_max =
515 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
516 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
517 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
518 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
520 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
521 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
523 cap->shaper_n_max = cap->shaper_private_n_max +
524 cap->shaper_shared_n_max;
526 cap->shaper_shared_n_nodes_per_shaper_max =
527 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
529 cap->sched_n_children_max = RTE_MAX(
530 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
531 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
533 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
536 cap->cman_wred_context_private_n_max =
537 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
539 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
540 cap->cman_wred_context_shared_n_max;
545 static const struct rte_tm_level_capabilities tm_level_cap[] = {
546 [TM_NODE_LEVEL_PORT] = {
548 .n_nodes_nonleaf_max = 1,
549 .n_nodes_leaf_max = 0,
550 .non_leaf_nodes_identical = 1,
551 .leaf_nodes_identical = 0,
554 .shaper_private_supported = 1,
555 .shaper_private_dual_rate_supported = 0,
556 .shaper_private_rate_min = 1,
557 .shaper_private_rate_max = UINT32_MAX,
558 .shaper_private_packet_mode_supported = 0,
559 .shaper_private_byte_mode_supported = 1,
560 .shaper_shared_n_max = 0,
561 .shaper_shared_packet_mode_supported = 0,
562 .shaper_shared_byte_mode_supported = 0,
564 .sched_n_children_max = UINT32_MAX,
565 .sched_sp_n_priorities_max = 1,
566 .sched_wfq_n_children_per_group_max = UINT32_MAX,
567 .sched_wfq_n_groups_max = 1,
568 .sched_wfq_weight_max = 1,
569 .sched_wfq_packet_mode_supported = 0,
570 .sched_wfq_byte_mode_supported = 1,
572 .stats_mask = STATS_MASK_DEFAULT,
576 [TM_NODE_LEVEL_SUBPORT] = {
577 .n_nodes_max = UINT32_MAX,
578 .n_nodes_nonleaf_max = UINT32_MAX,
579 .n_nodes_leaf_max = 0,
580 .non_leaf_nodes_identical = 1,
581 .leaf_nodes_identical = 0,
584 .shaper_private_supported = 1,
585 .shaper_private_dual_rate_supported = 0,
586 .shaper_private_rate_min = 1,
587 .shaper_private_rate_max = UINT32_MAX,
588 .shaper_private_packet_mode_supported = 0,
589 .shaper_private_byte_mode_supported = 1,
590 .shaper_shared_n_max = 0,
591 .shaper_shared_packet_mode_supported = 0,
592 .shaper_shared_byte_mode_supported = 0,
594 .sched_n_children_max = UINT32_MAX,
595 .sched_sp_n_priorities_max = 1,
596 .sched_wfq_n_children_per_group_max = UINT32_MAX,
597 .sched_wfq_n_groups_max = 1,
598 .sched_wfq_weight_max = UINT32_MAX,
599 .sched_wfq_packet_mode_supported = 0,
600 .sched_wfq_byte_mode_supported = 1,
602 .stats_mask = STATS_MASK_DEFAULT,
606 [TM_NODE_LEVEL_PIPE] = {
607 .n_nodes_max = UINT32_MAX,
608 .n_nodes_nonleaf_max = UINT32_MAX,
609 .n_nodes_leaf_max = 0,
610 .non_leaf_nodes_identical = 1,
611 .leaf_nodes_identical = 0,
614 .shaper_private_supported = 1,
615 .shaper_private_dual_rate_supported = 0,
616 .shaper_private_rate_min = 1,
617 .shaper_private_rate_max = UINT32_MAX,
618 .shaper_private_packet_mode_supported = 0,
619 .shaper_private_byte_mode_supported = 1,
620 .shaper_shared_n_max = 0,
621 .shaper_shared_packet_mode_supported = 0,
622 .shaper_shared_byte_mode_supported = 0,
624 .sched_n_children_max =
625 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
626 .sched_sp_n_priorities_max =
627 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
628 .sched_wfq_n_children_per_group_max = 1,
629 .sched_wfq_n_groups_max = 0,
630 .sched_wfq_weight_max = 1,
631 .sched_wfq_packet_mode_supported = 0,
632 .sched_wfq_byte_mode_supported = 0,
634 .stats_mask = STATS_MASK_DEFAULT,
638 [TM_NODE_LEVEL_TC] = {
639 .n_nodes_max = UINT32_MAX,
640 .n_nodes_nonleaf_max = UINT32_MAX,
641 .n_nodes_leaf_max = 0,
642 .non_leaf_nodes_identical = 1,
643 .leaf_nodes_identical = 0,
646 .shaper_private_supported = 1,
647 .shaper_private_dual_rate_supported = 0,
648 .shaper_private_rate_min = 1,
649 .shaper_private_rate_max = UINT32_MAX,
650 .shaper_private_packet_mode_supported = 0,
651 .shaper_private_byte_mode_supported = 1,
652 .shaper_shared_n_max = 1,
653 .shaper_shared_packet_mode_supported = 0,
654 .shaper_shared_byte_mode_supported = 1,
656 .sched_n_children_max =
657 RTE_SCHED_BE_QUEUES_PER_PIPE,
658 .sched_sp_n_priorities_max = 1,
659 .sched_wfq_n_children_per_group_max =
660 RTE_SCHED_BE_QUEUES_PER_PIPE,
661 .sched_wfq_n_groups_max = 1,
662 .sched_wfq_weight_max = UINT32_MAX,
663 .sched_wfq_packet_mode_supported = 0,
664 .sched_wfq_byte_mode_supported = 1,
666 .stats_mask = STATS_MASK_DEFAULT,
670 [TM_NODE_LEVEL_QUEUE] = {
671 .n_nodes_max = UINT32_MAX,
672 .n_nodes_nonleaf_max = 0,
673 .n_nodes_leaf_max = UINT32_MAX,
674 .non_leaf_nodes_identical = 0,
675 .leaf_nodes_identical = 1,
678 .shaper_private_supported = 0,
679 .shaper_private_dual_rate_supported = 0,
680 .shaper_private_rate_min = 0,
681 .shaper_private_rate_max = 0,
682 .shaper_private_packet_mode_supported = 0,
683 .shaper_private_byte_mode_supported = 0,
684 .shaper_shared_n_max = 0,
685 .shaper_shared_packet_mode_supported = 0,
686 .shaper_shared_byte_mode_supported = 0,
688 .cman_head_drop_supported = 0,
689 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
690 .cman_wred_byte_mode_supported = 0,
691 .cman_wred_context_private_supported = WRED_SUPPORTED,
692 .cman_wred_context_shared_n_max = 0,
694 .stats_mask = STATS_MASK_QUEUE,
699 /* Traffic manager level capabilities get */
701 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
703 struct rte_tm_level_capabilities *cap,
704 struct rte_tm_error *error)
707 return -rte_tm_error_set(error,
709 RTE_TM_ERROR_TYPE_CAPABILITIES,
711 rte_strerror(EINVAL));
713 if (level_id >= TM_NODE_LEVEL_MAX)
714 return -rte_tm_error_set(error,
716 RTE_TM_ERROR_TYPE_LEVEL_ID,
718 rte_strerror(EINVAL));
720 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
723 case TM_NODE_LEVEL_PORT:
724 cap->nonleaf.sched_n_children_max =
725 tm_level_get_max_nodes(dev,
726 TM_NODE_LEVEL_SUBPORT);
727 cap->nonleaf.sched_wfq_n_children_per_group_max =
728 cap->nonleaf.sched_n_children_max;
731 case TM_NODE_LEVEL_SUBPORT:
732 cap->n_nodes_max = tm_level_get_max_nodes(dev,
733 TM_NODE_LEVEL_SUBPORT);
734 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
735 cap->nonleaf.sched_n_children_max =
736 tm_level_get_max_nodes(dev,
738 cap->nonleaf.sched_wfq_n_children_per_group_max =
739 cap->nonleaf.sched_n_children_max;
742 case TM_NODE_LEVEL_PIPE:
743 cap->n_nodes_max = tm_level_get_max_nodes(dev,
745 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
748 case TM_NODE_LEVEL_TC:
749 cap->n_nodes_max = tm_level_get_max_nodes(dev,
751 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
754 case TM_NODE_LEVEL_QUEUE:
756 cap->n_nodes_max = tm_level_get_max_nodes(dev,
757 TM_NODE_LEVEL_QUEUE);
758 cap->n_nodes_leaf_max = cap->n_nodes_max;
765 static const struct rte_tm_node_capabilities tm_node_cap[] = {
766 [TM_NODE_LEVEL_PORT] = {
767 .shaper_private_supported = 1,
768 .shaper_private_dual_rate_supported = 0,
769 .shaper_private_rate_min = 1,
770 .shaper_private_rate_max = UINT32_MAX,
771 .shaper_private_packet_mode_supported = 0,
772 .shaper_private_byte_mode_supported = 1,
773 .shaper_shared_n_max = 0,
774 .shaper_shared_packet_mode_supported = 0,
775 .shaper_shared_byte_mode_supported = 0,
778 .sched_n_children_max = UINT32_MAX,
779 .sched_sp_n_priorities_max = 1,
780 .sched_wfq_n_children_per_group_max = UINT32_MAX,
781 .sched_wfq_n_groups_max = 1,
782 .sched_wfq_weight_max = 1,
783 .sched_wfq_packet_mode_supported = 0,
784 .sched_wfq_byte_mode_supported = 1,
787 .stats_mask = STATS_MASK_DEFAULT,
790 [TM_NODE_LEVEL_SUBPORT] = {
791 .shaper_private_supported = 1,
792 .shaper_private_dual_rate_supported = 0,
793 .shaper_private_rate_min = 1,
794 .shaper_private_rate_max = UINT32_MAX,
795 .shaper_private_packet_mode_supported = 0,
796 .shaper_private_byte_mode_supported = 1,
797 .shaper_shared_n_max = 0,
798 .shaper_shared_packet_mode_supported = 0,
799 .shaper_shared_byte_mode_supported = 0,
802 .sched_n_children_max = UINT32_MAX,
803 .sched_sp_n_priorities_max = 1,
804 .sched_wfq_n_children_per_group_max = UINT32_MAX,
805 .sched_wfq_n_groups_max = 1,
806 .sched_wfq_weight_max = UINT32_MAX,
807 .sched_wfq_packet_mode_supported = 0,
808 .sched_wfq_byte_mode_supported = 1,
811 .stats_mask = STATS_MASK_DEFAULT,
814 [TM_NODE_LEVEL_PIPE] = {
815 .shaper_private_supported = 1,
816 .shaper_private_dual_rate_supported = 0,
817 .shaper_private_rate_min = 1,
818 .shaper_private_rate_max = UINT32_MAX,
819 .shaper_private_packet_mode_supported = 0,
820 .shaper_private_byte_mode_supported = 1,
821 .shaper_shared_n_max = 0,
822 .shaper_shared_packet_mode_supported = 0,
823 .shaper_shared_byte_mode_supported = 0,
826 .sched_n_children_max =
827 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
828 .sched_sp_n_priorities_max =
829 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
830 .sched_wfq_n_children_per_group_max = 1,
831 .sched_wfq_n_groups_max = 0,
832 .sched_wfq_weight_max = 1,
833 .sched_wfq_packet_mode_supported = 0,
834 .sched_wfq_byte_mode_supported = 0,
837 .stats_mask = STATS_MASK_DEFAULT,
840 [TM_NODE_LEVEL_TC] = {
841 .shaper_private_supported = 1,
842 .shaper_private_dual_rate_supported = 0,
843 .shaper_private_rate_min = 1,
844 .shaper_private_rate_max = UINT32_MAX,
845 .shaper_private_packet_mode_supported = 0,
846 .shaper_private_byte_mode_supported = 1,
847 .shaper_shared_n_max = 1,
848 .shaper_shared_packet_mode_supported = 0,
849 .shaper_shared_byte_mode_supported = 1,
852 .sched_n_children_max =
853 RTE_SCHED_BE_QUEUES_PER_PIPE,
854 .sched_sp_n_priorities_max = 1,
855 .sched_wfq_n_children_per_group_max =
856 RTE_SCHED_BE_QUEUES_PER_PIPE,
857 .sched_wfq_n_groups_max = 1,
858 .sched_wfq_weight_max = UINT32_MAX,
859 .sched_wfq_packet_mode_supported = 0,
860 .sched_wfq_byte_mode_supported = 1,
863 .stats_mask = STATS_MASK_DEFAULT,
866 [TM_NODE_LEVEL_QUEUE] = {
867 .shaper_private_supported = 0,
868 .shaper_private_dual_rate_supported = 0,
869 .shaper_private_rate_min = 0,
870 .shaper_private_rate_max = 0,
871 .shaper_private_packet_mode_supported = 0,
872 .shaper_private_byte_mode_supported = 0,
873 .shaper_shared_n_max = 0,
874 .shaper_shared_packet_mode_supported = 0,
875 .shaper_shared_byte_mode_supported = 0,
879 .cman_head_drop_supported = 0,
880 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
881 .cman_wred_byte_mode_supported = 0,
882 .cman_wred_context_private_supported = WRED_SUPPORTED,
883 .cman_wred_context_shared_n_max = 0,
886 .stats_mask = STATS_MASK_QUEUE,
890 /* Traffic manager node capabilities get */
892 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
894 struct rte_tm_node_capabilities *cap,
895 struct rte_tm_error *error)
897 struct tm_node *tm_node;
900 return -rte_tm_error_set(error,
902 RTE_TM_ERROR_TYPE_CAPABILITIES,
904 rte_strerror(EINVAL));
906 tm_node = tm_node_search(dev, node_id);
908 return -rte_tm_error_set(error,
910 RTE_TM_ERROR_TYPE_NODE_ID,
912 rte_strerror(EINVAL));
914 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
916 switch (tm_node->level) {
917 case TM_NODE_LEVEL_PORT:
918 cap->nonleaf.sched_n_children_max =
919 tm_level_get_max_nodes(dev,
920 TM_NODE_LEVEL_SUBPORT);
921 cap->nonleaf.sched_wfq_n_children_per_group_max =
922 cap->nonleaf.sched_n_children_max;
925 case TM_NODE_LEVEL_SUBPORT:
926 cap->nonleaf.sched_n_children_max =
927 tm_level_get_max_nodes(dev,
929 cap->nonleaf.sched_wfq_n_children_per_group_max =
930 cap->nonleaf.sched_n_children_max;
933 case TM_NODE_LEVEL_PIPE:
934 case TM_NODE_LEVEL_TC:
935 case TM_NODE_LEVEL_QUEUE:
944 shaper_profile_check(struct rte_eth_dev *dev,
945 uint32_t shaper_profile_id,
946 struct rte_tm_shaper_params *profile,
947 struct rte_tm_error *error)
949 struct tm_shaper_profile *sp;
951 /* Shaper profile ID must not be NONE. */
952 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
953 return -rte_tm_error_set(error,
955 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
957 rte_strerror(EINVAL));
959 /* Shaper profile must not exist. */
960 sp = tm_shaper_profile_search(dev, shaper_profile_id);
962 return -rte_tm_error_set(error,
964 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
966 rte_strerror(EEXIST));
968 /* Profile must not be NULL. */
970 return -rte_tm_error_set(error,
972 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
974 rte_strerror(EINVAL));
976 /* Peak rate: non-zero, 32-bit */
977 if (profile->peak.rate == 0 ||
978 profile->peak.rate >= UINT32_MAX)
979 return -rte_tm_error_set(error,
981 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
983 rte_strerror(EINVAL));
985 /* Peak size: non-zero, 32-bit */
986 if (profile->peak.size == 0 ||
987 profile->peak.size >= UINT32_MAX)
988 return -rte_tm_error_set(error,
990 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
992 rte_strerror(EINVAL));
994 /* Dual-rate profiles are not supported. */
995 if (profile->committed.rate != 0)
996 return -rte_tm_error_set(error,
998 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
1000 rte_strerror(EINVAL));
1002 /* Packet length adjust: 24 bytes */
1003 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
1004 return -rte_tm_error_set(error,
1006 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
1008 rte_strerror(EINVAL));
1010 /* Packet mode is not supported. */
1011 if (profile->packet_mode != 0)
1012 return -rte_tm_error_set(error,
1014 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE,
1016 rte_strerror(EINVAL));
1020 /* Traffic manager shaper profile add */
1022 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
1023 uint32_t shaper_profile_id,
1024 struct rte_tm_shaper_params *profile,
1025 struct rte_tm_error *error)
1027 struct pmd_internals *p = dev->data->dev_private;
1028 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
1029 struct tm_shaper_profile *sp;
1032 /* Check input params */
1033 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
1037 /* Memory allocation */
1038 sp = calloc(1, sizeof(struct tm_shaper_profile));
1040 return -rte_tm_error_set(error,
1042 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1044 rte_strerror(ENOMEM));
1047 sp->shaper_profile_id = shaper_profile_id;
1048 memcpy(&sp->params, profile, sizeof(sp->params));
1051 TAILQ_INSERT_TAIL(spl, sp, node);
1052 p->soft.tm.h.n_shaper_profiles++;
1057 /* Traffic manager shaper profile delete */
1059 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1060 uint32_t shaper_profile_id,
1061 struct rte_tm_error *error)
1063 struct pmd_internals *p = dev->data->dev_private;
1064 struct tm_shaper_profile *sp;
1066 /* Check existing */
1067 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1069 return -rte_tm_error_set(error,
1071 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1073 rte_strerror(EINVAL));
1077 return -rte_tm_error_set(error,
1079 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1081 rte_strerror(EBUSY));
1083 /* Remove from list */
1084 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1085 p->soft.tm.h.n_shaper_profiles--;
1091 static struct tm_node *
1092 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1093 struct tm_shared_shaper *ss)
1095 struct pmd_internals *p = dev->data->dev_private;
1096 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1099 /* Subport: each TC uses shared shaper */
1100 TAILQ_FOREACH(n, nl, node) {
1101 if (n->level != TM_NODE_LEVEL_TC ||
1102 n->params.n_shared_shapers == 0 ||
1103 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1113 subport_profile_exists(struct rte_eth_dev *dev,
1114 struct rte_sched_subport_profile_params *sp,
1115 uint32_t *subport_profile_id)
1117 struct pmd_internals *p = dev->data->dev_private;
1118 struct tm_params *t = &p->soft.tm.params;
1121 for (i = 0; i < t->n_subport_profiles; i++)
1122 if (memcmp(&t->subport_profile[i], sp, sizeof(*sp)) == 0) {
1123 if (subport_profile_id)
1124 *subport_profile_id = i;
1132 update_subport_tc_rate(struct rte_eth_dev *dev,
1134 struct tm_shared_shaper *ss,
1135 struct tm_shaper_profile *sp_new)
1137 struct rte_sched_subport_profile_params subport_profile;
1138 struct pmd_internals *p = dev->data->dev_private;
1139 uint32_t tc_id = tm_node_tc_id(dev, nt);
1140 struct tm_node *np = nt->parent_node;
1141 struct tm_node *ns = np->parent_node;
1142 uint32_t subport_id = tm_node_subport_id(dev, ns);
1143 struct tm_params *t = &p->soft.tm.params;
1144 uint32_t subport_profile_id;
1145 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1146 ss->shaper_profile_id);
1148 if (subport_id >= TM_MAX_SUBPORT_PROFILE)
1151 subport_profile_id = t->subport_to_profile[subport_id];
1153 /* Derive new subport configuration. */
1154 memcpy(&subport_profile,
1155 &p->soft.tm.params.subport_profile[subport_profile_id],
1156 sizeof(subport_profile));
1157 subport_profile.tc_rate[tc_id] = sp_new->params.peak.rate;
1159 /* Update the subport configuration. */
1160 if (rte_sched_subport_config(SCHED(p),
1161 subport_id, NULL, subport_profile_id))
1164 /* Commit changes. */
1167 ss->shaper_profile_id = sp_new->shaper_profile_id;
1170 memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
1172 sizeof(subport_profile));
1177 /* Traffic manager shared shaper add/update */
1179 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1180 uint32_t shared_shaper_id,
1181 uint32_t shaper_profile_id,
1182 struct rte_tm_error *error)
1184 struct pmd_internals *p = dev->data->dev_private;
1185 struct tm_shared_shaper *ss;
1186 struct tm_shaper_profile *sp;
1189 /* Shaper profile must be valid. */
1190 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1192 return -rte_tm_error_set(error,
1194 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1196 rte_strerror(EINVAL));
1199 * Add new shared shaper
1201 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1203 struct tm_shared_shaper_list *ssl =
1204 &p->soft.tm.h.shared_shapers;
1206 /* Hierarchy must not be frozen */
1207 if (p->soft.tm.hierarchy_frozen)
1208 return -rte_tm_error_set(error,
1210 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1212 rte_strerror(EBUSY));
1214 /* Memory allocation */
1215 ss = calloc(1, sizeof(struct tm_shared_shaper));
1217 return -rte_tm_error_set(error,
1219 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1221 rte_strerror(ENOMEM));
1224 ss->shared_shaper_id = shared_shaper_id;
1225 ss->shaper_profile_id = shaper_profile_id;
1228 TAILQ_INSERT_TAIL(ssl, ss, node);
1229 p->soft.tm.h.n_shared_shapers++;
1235 * Update existing shared shaper
1237 /* Hierarchy must be frozen (run-time update) */
1238 if (p->soft.tm.hierarchy_frozen == 0)
1239 return -rte_tm_error_set(error,
1241 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1243 rte_strerror(EBUSY));
1246 /* Propagate change. */
1247 nt = tm_shared_shaper_get_tc(dev, ss);
1248 if (update_subport_tc_rate(dev, nt, ss, sp))
1249 return -rte_tm_error_set(error,
1251 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1253 rte_strerror(EINVAL));
1258 /* Traffic manager shared shaper delete */
1260 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1261 uint32_t shared_shaper_id,
1262 struct rte_tm_error *error)
1264 struct pmd_internals *p = dev->data->dev_private;
1265 struct tm_shared_shaper *ss;
1267 /* Check existing */
1268 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1270 return -rte_tm_error_set(error,
1272 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1274 rte_strerror(EINVAL));
1278 return -rte_tm_error_set(error,
1280 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1282 rte_strerror(EBUSY));
1284 /* Remove from list */
1285 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1286 p->soft.tm.h.n_shared_shapers--;
1293 wred_profile_check(struct rte_eth_dev *dev,
1294 uint32_t wred_profile_id,
1295 struct rte_tm_wred_params *profile,
1296 struct rte_tm_error *error)
1298 struct tm_wred_profile *wp;
1299 enum rte_color color;
1301 /* WRED profile ID must not be NONE. */
1302 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1303 return -rte_tm_error_set(error,
1305 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1307 rte_strerror(EINVAL));
1309 /* WRED profile must not exist. */
1310 wp = tm_wred_profile_search(dev, wred_profile_id);
1312 return -rte_tm_error_set(error,
1314 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1316 rte_strerror(EEXIST));
1318 /* Profile must not be NULL. */
1319 if (profile == NULL)
1320 return -rte_tm_error_set(error,
1322 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1324 rte_strerror(EINVAL));
1326 /* WRED profile should be in packet mode */
1327 if (profile->packet_mode == 0)
1328 return -rte_tm_error_set(error,
1330 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1332 rte_strerror(ENOTSUP));
1334 /* min_th <= max_th, max_th > 0 */
1335 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1336 uint32_t min_th = profile->red_params[color].min_th;
1337 uint32_t max_th = profile->red_params[color].max_th;
1339 if (min_th > max_th ||
1341 min_th > UINT16_MAX ||
1342 max_th > UINT16_MAX)
1343 return -rte_tm_error_set(error,
1345 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1347 rte_strerror(EINVAL));
1353 /* Traffic manager WRED profile add */
1355 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1356 uint32_t wred_profile_id,
1357 struct rte_tm_wred_params *profile,
1358 struct rte_tm_error *error)
1360 struct pmd_internals *p = dev->data->dev_private;
1361 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1362 struct tm_wred_profile *wp;
1365 /* Check input params */
1366 status = wred_profile_check(dev, wred_profile_id, profile, error);
1370 /* Memory allocation */
1371 wp = calloc(1, sizeof(struct tm_wred_profile));
1373 return -rte_tm_error_set(error,
1375 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1377 rte_strerror(ENOMEM));
1380 wp->wred_profile_id = wred_profile_id;
1381 memcpy(&wp->params, profile, sizeof(wp->params));
1384 TAILQ_INSERT_TAIL(wpl, wp, node);
1385 p->soft.tm.h.n_wred_profiles++;
1390 /* Traffic manager WRED profile delete */
1392 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1393 uint32_t wred_profile_id,
1394 struct rte_tm_error *error)
1396 struct pmd_internals *p = dev->data->dev_private;
1397 struct tm_wred_profile *wp;
1399 /* Check existing */
1400 wp = tm_wred_profile_search(dev, wred_profile_id);
1402 return -rte_tm_error_set(error,
1404 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1406 rte_strerror(EINVAL));
1410 return -rte_tm_error_set(error,
1412 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1414 rte_strerror(EBUSY));
1416 /* Remove from list */
1417 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1418 p->soft.tm.h.n_wred_profiles--;
1425 node_add_check_port(struct rte_eth_dev *dev,
1427 uint32_t parent_node_id __rte_unused,
1430 uint32_t level_id __rte_unused,
1431 struct rte_tm_node_params *params,
1432 struct rte_tm_error *error)
1434 struct pmd_internals *p = dev->data->dev_private;
1435 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1436 params->shaper_profile_id);
1438 /* node type: non-leaf */
1439 if (node_id < p->params.tm.n_queues)
1440 return -rte_tm_error_set(error,
1442 RTE_TM_ERROR_TYPE_NODE_ID,
1444 rte_strerror(EINVAL));
1446 /* Priority must be 0 */
1448 return -rte_tm_error_set(error,
1450 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1452 rte_strerror(EINVAL));
1454 /* Weight must be 1 */
1456 return -rte_tm_error_set(error,
1458 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1460 rte_strerror(EINVAL));
1462 /* Shaper must be valid */
1463 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1465 return -rte_tm_error_set(error,
1467 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1469 rte_strerror(EINVAL));
1471 /* No shared shapers */
1472 if (params->n_shared_shapers != 0)
1473 return -rte_tm_error_set(error,
1475 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1477 rte_strerror(EINVAL));
1479 /* Number of SP priorities must be 1 */
1480 if (params->nonleaf.n_sp_priorities != 1)
1481 return -rte_tm_error_set(error,
1483 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1485 rte_strerror(EINVAL));
1488 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1489 return -rte_tm_error_set(error,
1491 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1493 rte_strerror(EINVAL));
1499 node_add_check_subport(struct rte_eth_dev *dev,
1501 uint32_t parent_node_id __rte_unused,
1504 uint32_t level_id __rte_unused,
1505 struct rte_tm_node_params *params,
1506 struct rte_tm_error *error)
1508 struct pmd_internals *p = dev->data->dev_private;
1510 /* node type: non-leaf */
1511 if (node_id < p->params.tm.n_queues)
1512 return -rte_tm_error_set(error,
1514 RTE_TM_ERROR_TYPE_NODE_ID,
1516 rte_strerror(EINVAL));
1518 /* Priority must be 0 */
1520 return -rte_tm_error_set(error,
1522 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1524 rte_strerror(EINVAL));
1526 /* Weight must be 1 */
1528 return -rte_tm_error_set(error,
1530 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1532 rte_strerror(EINVAL));
1534 /* Shaper must be valid */
1535 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1536 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1537 return -rte_tm_error_set(error,
1539 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1541 rte_strerror(EINVAL));
1543 /* No shared shapers */
1544 if (params->n_shared_shapers != 0)
1545 return -rte_tm_error_set(error,
1547 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1549 rte_strerror(EINVAL));
1551 /* Number of SP priorities must be 1 */
1552 if (params->nonleaf.n_sp_priorities != 1)
1553 return -rte_tm_error_set(error,
1555 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1557 rte_strerror(EINVAL));
1560 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1561 return -rte_tm_error_set(error,
1563 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1565 rte_strerror(EINVAL));
1571 node_add_check_pipe(struct rte_eth_dev *dev,
1573 uint32_t parent_node_id __rte_unused,
1575 uint32_t weight __rte_unused,
1576 uint32_t level_id __rte_unused,
1577 struct rte_tm_node_params *params,
1578 struct rte_tm_error *error)
1580 struct pmd_internals *p = dev->data->dev_private;
1582 /* node type: non-leaf */
1583 if (node_id < p->params.tm.n_queues)
1584 return -rte_tm_error_set(error,
1586 RTE_TM_ERROR_TYPE_NODE_ID,
1588 rte_strerror(EINVAL));
1590 /* Priority must be 0 */
1592 return -rte_tm_error_set(error,
1594 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1596 rte_strerror(EINVAL));
1598 /* Shaper must be valid */
1599 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1600 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1601 return -rte_tm_error_set(error,
1603 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1605 rte_strerror(EINVAL));
1607 /* No shared shapers */
1608 if (params->n_shared_shapers != 0)
1609 return -rte_tm_error_set(error,
1611 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1613 rte_strerror(EINVAL));
1615 /* Number of SP priorities must be 4 */
1616 if (params->nonleaf.n_sp_priorities !=
1617 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1618 return -rte_tm_error_set(error,
1620 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1622 rte_strerror(EINVAL));
1624 /* WFQ mode must be byte mode */
1625 if (params->nonleaf.wfq_weight_mode != NULL &&
1626 params->nonleaf.wfq_weight_mode[0] != 0 &&
1627 params->nonleaf.wfq_weight_mode[1] != 0 &&
1628 params->nonleaf.wfq_weight_mode[2] != 0 &&
1629 params->nonleaf.wfq_weight_mode[3] != 0)
1630 return -rte_tm_error_set(error,
1632 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1634 rte_strerror(EINVAL));
1637 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1638 return -rte_tm_error_set(error,
1640 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1642 rte_strerror(EINVAL));
1648 node_add_check_tc(struct rte_eth_dev *dev,
1650 uint32_t parent_node_id __rte_unused,
1651 uint32_t priority __rte_unused,
1653 uint32_t level_id __rte_unused,
1654 struct rte_tm_node_params *params,
1655 struct rte_tm_error *error)
1657 struct pmd_internals *p = dev->data->dev_private;
1659 /* node type: non-leaf */
1660 if (node_id < p->params.tm.n_queues)
1661 return -rte_tm_error_set(error,
1663 RTE_TM_ERROR_TYPE_NODE_ID,
1665 rte_strerror(EINVAL));
1667 /* Weight must be 1 */
1669 return -rte_tm_error_set(error,
1671 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1673 rte_strerror(EINVAL));
1675 /* Shaper must be valid */
1676 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1677 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1678 return -rte_tm_error_set(error,
1680 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1682 rte_strerror(EINVAL));
1684 /* Single valid shared shaper */
1685 if (params->n_shared_shapers > 1)
1686 return -rte_tm_error_set(error,
1688 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1690 rte_strerror(EINVAL));
1692 if (params->n_shared_shapers == 1 &&
1693 (params->shared_shaper_id == NULL ||
1694 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1695 return -rte_tm_error_set(error,
1697 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1699 rte_strerror(EINVAL));
1701 /* Number of priorities must be 1 */
1702 if (params->nonleaf.n_sp_priorities != 1)
1703 return -rte_tm_error_set(error,
1705 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1707 rte_strerror(EINVAL));
1710 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1711 return -rte_tm_error_set(error,
1713 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1715 rte_strerror(EINVAL));
1721 node_add_check_queue(struct rte_eth_dev *dev,
1723 uint32_t parent_node_id __rte_unused,
1725 uint32_t weight __rte_unused,
1726 uint32_t level_id __rte_unused,
1727 struct rte_tm_node_params *params,
1728 struct rte_tm_error *error)
1730 struct pmd_internals *p = dev->data->dev_private;
1732 /* node type: leaf */
1733 if (node_id >= p->params.tm.n_queues)
1734 return -rte_tm_error_set(error,
1736 RTE_TM_ERROR_TYPE_NODE_ID,
1738 rte_strerror(EINVAL));
1740 /* Priority must be 0 */
1742 return -rte_tm_error_set(error,
1744 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1746 rte_strerror(EINVAL));
1749 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1750 return -rte_tm_error_set(error,
1752 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1754 rte_strerror(EINVAL));
1756 /* No shared shapers */
1757 if (params->n_shared_shapers != 0)
1758 return -rte_tm_error_set(error,
1760 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1762 rte_strerror(EINVAL));
1764 /* Congestion management must not be head drop */
1765 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1766 return -rte_tm_error_set(error,
1768 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1770 rte_strerror(EINVAL));
1772 /* Congestion management set to WRED */
1773 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1774 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1775 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1778 /* WRED profile (for private WRED context) must be valid */
1779 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1781 return -rte_tm_error_set(error,
1783 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1785 rte_strerror(EINVAL));
1787 /* No shared WRED contexts */
1788 if (params->leaf.wred.n_shared_wred_contexts != 0)
1789 return -rte_tm_error_set(error,
1791 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1793 rte_strerror(EINVAL));
1797 if (params->stats_mask & ~STATS_MASK_QUEUE)
1798 return -rte_tm_error_set(error,
1800 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1802 rte_strerror(EINVAL));
1808 node_add_check(struct rte_eth_dev *dev,
1810 uint32_t parent_node_id,
1814 struct rte_tm_node_params *params,
1815 struct rte_tm_error *error)
1821 /* node_id, parent_node_id:
1822 * -node_id must not be RTE_TM_NODE_ID_NULL
1823 * -node_id must not be in use
1824 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1825 * -root node must not exist
1826 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1827 * -parent_node_id must be valid
1829 if (node_id == RTE_TM_NODE_ID_NULL)
1830 return -rte_tm_error_set(error,
1832 RTE_TM_ERROR_TYPE_NODE_ID,
1834 rte_strerror(EINVAL));
1836 if (tm_node_search(dev, node_id))
1837 return -rte_tm_error_set(error,
1839 RTE_TM_ERROR_TYPE_NODE_ID,
1841 rte_strerror(EEXIST));
1843 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1845 if (tm_root_node_present(dev))
1846 return -rte_tm_error_set(error,
1848 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1850 rte_strerror(EEXIST));
1852 pn = tm_node_search(dev, parent_node_id);
1854 return -rte_tm_error_set(error,
1856 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1858 rte_strerror(EINVAL));
1861 /* priority: must be 0 .. 3 */
1862 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1863 return -rte_tm_error_set(error,
1865 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1867 rte_strerror(EINVAL));
1869 /* weight: must be 1 .. 255 */
1870 if (weight == 0 || weight >= UINT8_MAX)
1871 return -rte_tm_error_set(error,
1873 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1875 rte_strerror(EINVAL));
1877 /* level_id: if valid, then
1878 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1879 * -level_id must be zero
1880 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1881 * -level_id must be parent level ID plus one
1883 level = (pn == NULL) ? 0 : pn->level + 1;
1884 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1885 return -rte_tm_error_set(error,
1887 RTE_TM_ERROR_TYPE_LEVEL_ID,
1889 rte_strerror(EINVAL));
1891 /* params: must not be NULL */
1893 return -rte_tm_error_set(error,
1895 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1897 rte_strerror(EINVAL));
1899 /* params: per level checks */
1901 case TM_NODE_LEVEL_PORT:
1902 status = node_add_check_port(dev, node_id,
1903 parent_node_id, priority, weight, level_id,
1909 case TM_NODE_LEVEL_SUBPORT:
1910 status = node_add_check_subport(dev, node_id,
1911 parent_node_id, priority, weight, level_id,
1917 case TM_NODE_LEVEL_PIPE:
1918 status = node_add_check_pipe(dev, node_id,
1919 parent_node_id, priority, weight, level_id,
1925 case TM_NODE_LEVEL_TC:
1926 status = node_add_check_tc(dev, node_id,
1927 parent_node_id, priority, weight, level_id,
1933 case TM_NODE_LEVEL_QUEUE:
1934 status = node_add_check_queue(dev, node_id,
1935 parent_node_id, priority, weight, level_id,
1942 return -rte_tm_error_set(error,
1944 RTE_TM_ERROR_TYPE_LEVEL_ID,
1946 rte_strerror(EINVAL));
1952 /* Traffic manager node add */
1954 pmd_tm_node_add(struct rte_eth_dev *dev,
1956 uint32_t parent_node_id,
1960 struct rte_tm_node_params *params,
1961 struct rte_tm_error *error)
1963 struct pmd_internals *p = dev->data->dev_private;
1964 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1970 if (p->soft.tm.hierarchy_frozen)
1971 return -rte_tm_error_set(error,
1973 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1975 rte_strerror(EBUSY));
1977 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1978 level_id, params, error);
1982 /* Memory allocation */
1983 n = calloc(1, sizeof(struct tm_node));
1985 return -rte_tm_error_set(error,
1987 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1989 rte_strerror(ENOMEM));
1992 n->node_id = node_id;
1993 n->parent_node_id = parent_node_id;
1994 n->priority = priority;
1997 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1998 n->parent_node = tm_node_search(dev, parent_node_id);
1999 n->level = n->parent_node->level + 1;
2002 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
2003 n->shaper_profile = tm_shaper_profile_search(dev,
2004 params->shaper_profile_id);
2006 if (n->level == TM_NODE_LEVEL_QUEUE &&
2007 params->leaf.cman == RTE_TM_CMAN_WRED)
2008 n->wred_profile = tm_wred_profile_search(dev,
2009 params->leaf.wred.wred_profile_id);
2011 memcpy(&n->params, params, sizeof(n->params));
2014 TAILQ_INSERT_TAIL(nl, n, node);
2015 p->soft.tm.h.n_nodes++;
2017 /* Update dependencies */
2019 n->parent_node->n_children++;
2021 if (n->shaper_profile)
2022 n->shaper_profile->n_users++;
2024 for (i = 0; i < params->n_shared_shapers; i++) {
2025 struct tm_shared_shaper *ss;
2027 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
2031 if (n->wred_profile)
2032 n->wred_profile->n_users++;
2034 p->soft.tm.h.n_tm_nodes[n->level]++;
2039 /* Traffic manager node delete */
2041 pmd_tm_node_delete(struct rte_eth_dev *dev,
2043 struct rte_tm_error *error)
2045 struct pmd_internals *p = dev->data->dev_private;
2049 /* Check hierarchy changes are currently allowed */
2050 if (p->soft.tm.hierarchy_frozen)
2051 return -rte_tm_error_set(error,
2053 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2055 rte_strerror(EBUSY));
2057 /* Check existing */
2058 n = tm_node_search(dev, node_id);
2060 return -rte_tm_error_set(error,
2062 RTE_TM_ERROR_TYPE_NODE_ID,
2064 rte_strerror(EINVAL));
2068 return -rte_tm_error_set(error,
2070 RTE_TM_ERROR_TYPE_NODE_ID,
2072 rte_strerror(EBUSY));
2074 /* Update dependencies */
2075 p->soft.tm.h.n_tm_nodes[n->level]--;
2077 if (n->wred_profile)
2078 n->wred_profile->n_users--;
2080 for (i = 0; i < n->params.n_shared_shapers; i++) {
2081 struct tm_shared_shaper *ss;
2083 ss = tm_shared_shaper_search(dev,
2084 n->params.shared_shaper_id[i]);
2088 if (n->shaper_profile)
2089 n->shaper_profile->n_users--;
2092 n->parent_node->n_children--;
2094 /* Remove from list */
2095 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2096 p->soft.tm.h.n_nodes--;
2104 pipe_profile_build(struct rte_eth_dev *dev,
2106 struct rte_sched_pipe_params *pp)
2108 struct pmd_internals *p = dev->data->dev_private;
2109 struct tm_hierarchy *h = &p->soft.tm.h;
2110 struct tm_node_list *nl = &h->nodes;
2111 struct tm_node *nt, *nq;
2113 memset(pp, 0, sizeof(*pp));
2116 pp->tb_rate = np->shaper_profile->params.peak.rate;
2117 pp->tb_size = np->shaper_profile->params.peak.size;
2119 /* Traffic Class (TC) */
2120 pp->tc_period = PIPE_TC_PERIOD;
2122 pp->tc_ov_weight = np->weight;
2124 TAILQ_FOREACH(nt, nl, node) {
2125 uint32_t queue_id = 0;
2127 if (nt->level != TM_NODE_LEVEL_TC ||
2128 nt->parent_node_id != np->node_id)
2131 pp->tc_rate[nt->priority] =
2132 nt->shaper_profile->params.peak.rate;
2135 TAILQ_FOREACH(nq, nl, node) {
2137 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2138 nq->parent_node_id != nt->node_id)
2141 if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2142 pp->wrr_weights[queue_id] = nq->weight;
2150 pipe_profile_free_exists(struct rte_eth_dev *dev,
2151 uint32_t *pipe_profile_id)
2153 struct pmd_internals *p = dev->data->dev_private;
2154 struct tm_params *t = &p->soft.tm.params;
2156 if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2157 *pipe_profile_id = t->n_pipe_profiles;
2165 pipe_profile_exists(struct rte_eth_dev *dev,
2166 struct rte_sched_pipe_params *pp,
2167 uint32_t *pipe_profile_id)
2169 struct pmd_internals *p = dev->data->dev_private;
2170 struct tm_params *t = &p->soft.tm.params;
2173 for (i = 0; i < t->n_pipe_profiles; i++)
2174 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2175 if (pipe_profile_id)
2176 *pipe_profile_id = i;
2184 pipe_profile_install(struct rte_eth_dev *dev,
2185 struct rte_sched_pipe_params *pp,
2186 uint32_t pipe_profile_id)
2188 struct pmd_internals *p = dev->data->dev_private;
2189 struct tm_params *t = &p->soft.tm.params;
2191 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2192 t->n_pipe_profiles++;
2196 pipe_profile_mark(struct rte_eth_dev *dev,
2197 uint32_t subport_id,
2199 uint32_t pipe_profile_id)
2201 struct pmd_internals *p = dev->data->dev_private;
2202 struct tm_hierarchy *h = &p->soft.tm.h;
2203 struct tm_params *t = &p->soft.tm.params;
2204 uint32_t n_pipes_per_subport, pos;
2206 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2207 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2208 pos = subport_id * n_pipes_per_subport + pipe_id;
2210 t->pipe_to_profile[pos] = pipe_profile_id;
2213 static struct rte_sched_pipe_params *
2214 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2216 struct pmd_internals *p = dev->data->dev_private;
2217 struct tm_hierarchy *h = &p->soft.tm.h;
2218 struct tm_params *t = &p->soft.tm.params;
2219 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2220 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2222 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2223 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2225 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2226 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2228 return &t->pipe_profiles[pipe_profile_id];
2232 pipe_profiles_generate(struct rte_eth_dev *dev)
2234 struct pmd_internals *p = dev->data->dev_private;
2235 struct tm_hierarchy *h = &p->soft.tm.h;
2236 struct tm_node_list *nl = &h->nodes;
2237 struct tm_node *ns, *np;
2238 uint32_t subport_id;
2240 /* Objective: Fill in the following fields in struct tm_params:
2247 TAILQ_FOREACH(ns, nl, node) {
2250 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2254 TAILQ_FOREACH(np, nl, node) {
2255 struct rte_sched_pipe_params pp;
2258 memset(&pp, 0, sizeof(pp));
2260 if (np->level != TM_NODE_LEVEL_PIPE ||
2261 np->parent_node_id != ns->node_id)
2264 pipe_profile_build(dev, np, &pp);
2266 if (!pipe_profile_exists(dev, &pp, &pos)) {
2267 if (!pipe_profile_free_exists(dev, &pos))
2270 pipe_profile_install(dev, &pp, pos);
2273 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2284 static struct tm_wred_profile *
2285 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2287 struct pmd_internals *p = dev->data->dev_private;
2288 struct tm_hierarchy *h = &p->soft.tm.h;
2289 struct tm_node_list *nl = &h->nodes;
2292 TAILQ_FOREACH(nq, nl, node) {
2293 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2294 nq->parent_node->priority != tc_id)
2297 return nq->wred_profile;
2303 #ifdef RTE_SCHED_CMAN
2306 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
2308 struct pmd_internals *p = dev->data->dev_private;
2309 struct rte_sched_subport_params *pp =
2310 &p->soft.tm.params.subport_params[subport_id];
2313 enum rte_color color;
2315 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2316 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2317 struct rte_red_params *dst =
2318 &pp->cman_params->red_params[tc_id][color];
2319 struct tm_wred_profile *src_wp =
2320 tm_tc_wred_profile_get(dev, tc_id);
2321 struct rte_tm_red_params *src =
2322 &src_wp->params.red_params[color];
2324 memcpy(dst, src, sizeof(*dst));
2330 #define wred_profiles_set(dev, subport_id)
2334 static struct tm_shared_shaper *
2335 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2337 return (tc_node->params.n_shared_shapers) ?
2338 tm_shared_shaper_search(dev,
2339 tc_node->params.shared_shaper_id[0]) :
2343 static struct tm_shared_shaper *
2344 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2345 struct tm_node *subport_node,
2348 struct pmd_internals *p = dev->data->dev_private;
2349 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2352 TAILQ_FOREACH(n, nl, node) {
2353 if (n->level != TM_NODE_LEVEL_TC ||
2354 n->parent_node->parent_node_id !=
2355 subport_node->node_id ||
2356 n->priority != tc_id)
2359 return tm_tc_shared_shaper_get(dev, n);
2365 static struct rte_sched_subport_profile_params *
2366 subport_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2368 struct pmd_internals *p = dev->data->dev_private;
2369 struct tm_params *t = &p->soft.tm.params;
2370 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2372 if (subport_id >= TM_MAX_SUBPORT_PROFILE)
2375 return &t->subport_profile[subport_id];
2379 subport_profile_mark(struct rte_eth_dev *dev,
2380 uint32_t subport_id,
2381 uint32_t subport_profile_id)
2383 struct pmd_internals *p = dev->data->dev_private;
2384 struct tm_params *t = &p->soft.tm.params;
2386 t->subport_to_profile[subport_id] = subport_profile_id;
2390 subport_profile_install(struct rte_eth_dev *dev,
2391 struct rte_sched_subport_profile_params *sp,
2392 uint32_t subport_profile_id)
2394 struct pmd_internals *p = dev->data->dev_private;
2395 struct tm_params *t = &p->soft.tm.params;
2397 memcpy(&t->subport_profile[subport_profile_id],
2399 t->n_subport_profiles++;
2403 subport_profile_free_exists(struct rte_eth_dev *dev,
2404 uint32_t *subport_profile_id)
2406 struct pmd_internals *p = dev->data->dev_private;
2407 struct tm_params *t = &p->soft.tm.params;
2409 if (t->n_subport_profiles < TM_MAX_SUBPORT_PROFILE) {
2410 *subport_profile_id = t->n_subport_profiles;
2418 subport_profile_build(struct rte_eth_dev *dev, struct tm_node *np,
2419 struct rte_sched_subport_profile_params *sp)
2422 memset(sp, 0, sizeof(*sp));
2424 sp->tb_rate = np->shaper_profile->params.peak.rate;
2425 sp->tb_size = np->shaper_profile->params.peak.size;
2427 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2428 struct tm_shared_shaper *ss;
2429 struct tm_shaper_profile *ssp;
2431 ss = tm_subport_tc_shared_shaper_get(dev, np, i);
2432 ssp = (ss) ? tm_shaper_profile_search(dev,
2433 ss->shaper_profile_id) :
2435 sp->tc_rate[i] = ssp->params.peak.rate;
2438 /* Traffic Class (TC) */
2439 sp->tc_period = SUBPORT_TC_PERIOD;
2443 subport_profiles_generate(struct rte_eth_dev *dev)
2445 struct pmd_internals *p = dev->data->dev_private;
2446 struct tm_hierarchy *h = &p->soft.tm.h;
2447 struct tm_node_list *nl = &h->nodes;
2449 uint32_t subport_id;
2451 /* Objective: Fill in the following fields in struct tm_params:
2452 * - subport_profiles
2453 * - n_subport_profiles
2454 * - subport_to_profile
2458 TAILQ_FOREACH(ns, nl, node) {
2459 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2462 struct rte_sched_subport_profile_params sp;
2465 memset(&sp, 0, sizeof(sp));
2467 subport_profile_build(dev, ns, &sp);
2469 if (!subport_profile_exists(dev, &sp, &pos)) {
2470 if (!subport_profile_free_exists(dev, &pos))
2473 subport_profile_install(dev, &sp, pos);
2476 subport_profile_mark(dev, subport_id, pos);
2486 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2488 struct pmd_internals *p = dev->data->dev_private;
2489 struct tm_hierarchy *h = &p->soft.tm.h;
2490 struct tm_node_list *nl = &h->nodes;
2491 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2492 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2493 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2494 struct tm_shared_shaper *ss;
2496 uint32_t n_pipes_per_subport;
2498 /* Root node exists. */
2500 return -rte_tm_error_set(error,
2502 RTE_TM_ERROR_TYPE_LEVEL_ID,
2504 rte_strerror(EINVAL));
2506 /* There is at least one subport, max is not exceeded. */
2507 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2508 return -rte_tm_error_set(error,
2510 RTE_TM_ERROR_TYPE_LEVEL_ID,
2512 rte_strerror(EINVAL));
2514 /* There is at least one pipe. */
2515 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2516 return -rte_tm_error_set(error,
2518 RTE_TM_ERROR_TYPE_LEVEL_ID,
2520 rte_strerror(EINVAL));
2522 /* Number of pipes is the same for all subports. Maximum number of pipes
2523 * per subport is not exceeded.
2525 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2526 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2528 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2529 return -rte_tm_error_set(error,
2531 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2533 rte_strerror(EINVAL));
2535 TAILQ_FOREACH(ns, nl, node) {
2536 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2539 if (ns->n_children != n_pipes_per_subport)
2540 return -rte_tm_error_set(error,
2542 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2544 rte_strerror(EINVAL));
2547 /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2548 TAILQ_FOREACH(np, nl, node) {
2549 uint32_t mask = 0, mask_expected =
2550 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2553 if (np->level != TM_NODE_LEVEL_PIPE)
2556 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2557 return -rte_tm_error_set(error,
2559 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2561 rte_strerror(EINVAL));
2563 TAILQ_FOREACH(nt, nl, node) {
2564 if (nt->level != TM_NODE_LEVEL_TC ||
2565 nt->parent_node_id != np->node_id)
2568 mask |= 1 << nt->priority;
2571 if (mask != mask_expected)
2572 return -rte_tm_error_set(error,
2574 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2576 rte_strerror(EINVAL));
2579 /** Each Strict priority TC has exactly 1 packet queues while
2580 * lowest priority TC (Best-effort) has 4 queues.
2582 TAILQ_FOREACH(nt, nl, node) {
2583 if (nt->level != TM_NODE_LEVEL_TC)
2586 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2587 return -rte_tm_error_set(error,
2589 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2591 rte_strerror(EINVAL));
2596 * -For each TC #i, all pipes in the same subport use the same
2597 * shared shaper (or no shared shaper) for their TC#i.
2598 * -Each shared shaper needs to have at least one user. All its
2599 * users have to be TC nodes with the same priority and the same
2602 TAILQ_FOREACH(ns, nl, node) {
2603 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2606 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2609 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2610 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2612 TAILQ_FOREACH(nt, nl, node) {
2613 struct tm_shared_shaper *subport_ss, *tc_ss;
2615 if (nt->level != TM_NODE_LEVEL_TC ||
2616 nt->parent_node->parent_node_id !=
2620 subport_ss = s[nt->priority];
2621 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2623 if (subport_ss == NULL && tc_ss == NULL)
2626 if ((subport_ss == NULL && tc_ss != NULL) ||
2627 (subport_ss != NULL && tc_ss == NULL) ||
2628 subport_ss->shared_shaper_id !=
2629 tc_ss->shared_shaper_id)
2630 return -rte_tm_error_set(error,
2632 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2634 rte_strerror(EINVAL));
2638 TAILQ_FOREACH(ss, ssl, node) {
2639 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2640 uint32_t n_users = 0;
2643 TAILQ_FOREACH(nt, nl, node) {
2644 if (nt->level != TM_NODE_LEVEL_TC ||
2645 nt->priority != nt_any->priority ||
2646 nt->parent_node->parent_node_id !=
2647 nt_any->parent_node->parent_node_id)
2653 if (ss->n_users == 0 || ss->n_users != n_users)
2654 return -rte_tm_error_set(error,
2656 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2658 rte_strerror(EINVAL));
2661 /* Not too many subport profiles. */
2662 if (subport_profiles_generate(dev))
2663 return -rte_tm_error_set(error,
2665 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2667 rte_strerror(EINVAL));
2670 /* Not too many pipe profiles. */
2671 if (pipe_profiles_generate(dev))
2672 return -rte_tm_error_set(error,
2674 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2676 rte_strerror(EINVAL));
2679 * WRED (when used, i.e. at least one WRED profile defined):
2680 * -Each WRED profile must have at least one user.
2681 * -All leaf nodes must have their private WRED context enabled.
2682 * -For each TC #i, all leaf nodes must use the same WRED profile
2683 * for their private WRED context.
2685 if (h->n_wred_profiles) {
2686 struct tm_wred_profile *wp;
2687 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2690 TAILQ_FOREACH(wp, wpl, node)
2691 if (wp->n_users == 0)
2692 return -rte_tm_error_set(error,
2694 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2696 rte_strerror(EINVAL));
2698 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2699 w[id] = tm_tc_wred_profile_get(dev, id);
2702 return -rte_tm_error_set(error,
2704 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2706 rte_strerror(EINVAL));
2709 TAILQ_FOREACH(nq, nl, node) {
2712 if (nq->level != TM_NODE_LEVEL_QUEUE)
2715 id = nq->parent_node->priority;
2717 if (nq->wred_profile == NULL ||
2718 nq->wred_profile->wred_profile_id !=
2719 w[id]->wred_profile_id)
2720 return -rte_tm_error_set(error,
2722 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2724 rte_strerror(EINVAL));
2732 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2734 struct pmd_internals *p = dev->data->dev_private;
2735 struct tm_params *t = &p->soft.tm.params;
2736 struct tm_hierarchy *h = &p->soft.tm.h;
2738 struct tm_node_list *nl = &h->nodes;
2739 struct tm_node *root = tm_root_node_present(dev), *n;
2741 uint32_t subport_id;
2743 t->port_params = (struct rte_sched_port_params) {
2744 .name = dev->data->name,
2745 .socket = dev->data->numa_node,
2746 .rate = root->shaper_profile->params.peak.rate,
2747 .mtu = dev->data->mtu,
2749 root->shaper_profile->params.pkt_length_adjust,
2750 .n_subports_per_port = root->n_children,
2751 .n_subport_profiles = t->n_subport_profiles,
2752 .subport_profiles = t->subport_profile,
2753 .n_max_subport_profiles = TM_MAX_SUBPORT_PROFILE,
2754 .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
2758 TAILQ_FOREACH(n, nl, node) {
2760 if (n->level != TM_NODE_LEVEL_SUBPORT)
2763 t->subport_params[subport_id] =
2764 (struct rte_sched_subport_params) {
2765 .n_pipes_per_subport_enabled =
2766 h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2767 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2768 .qsize = {p->params.tm.qsize[0],
2769 p->params.tm.qsize[1],
2770 p->params.tm.qsize[2],
2771 p->params.tm.qsize[3],
2772 p->params.tm.qsize[4],
2773 p->params.tm.qsize[5],
2774 p->params.tm.qsize[6],
2775 p->params.tm.qsize[7],
2776 p->params.tm.qsize[8],
2777 p->params.tm.qsize[9],
2778 p->params.tm.qsize[10],
2779 p->params.tm.qsize[11],
2780 p->params.tm.qsize[12],
2782 .pipe_profiles = t->pipe_profiles,
2783 .n_pipe_profiles = t->n_pipe_profiles,
2784 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2786 wred_profiles_set(dev, subport_id);
2791 /* Traffic manager hierarchy commit */
2793 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2795 struct rte_tm_error *error)
2797 struct pmd_internals *p = dev->data->dev_private;
2801 if (p->soft.tm.hierarchy_frozen)
2802 return -rte_tm_error_set(error,
2804 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2806 rte_strerror(EBUSY));
2808 status = hierarchy_commit_check(dev, error);
2811 tm_hierarchy_free(p);
2816 /* Create blueprints */
2817 hierarchy_blueprints_create(dev);
2819 /* Freeze hierarchy */
2820 p->soft.tm.hierarchy_frozen = 1;
2826 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2828 struct pmd_internals *p = dev->data->dev_private;
2829 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2831 struct tm_node *ns = np->parent_node;
2832 uint32_t subport_id = tm_node_subport_id(dev, ns);
2834 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2835 struct rte_sched_pipe_params profile1;
2836 uint32_t pipe_profile_id;
2838 /* Derive new pipe profile. */
2839 memcpy(&profile1, profile0, sizeof(profile1));
2840 profile1.tc_ov_weight = (uint8_t)weight;
2842 /* Since implementation does not allow adding more pipe profiles after
2843 * port configuration, the pipe configuration can be successfully
2844 * updated only if the new profile is also part of the existing set of
2847 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2850 /* Update the pipe profile used by the current pipe. */
2851 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2852 (int32_t)pipe_profile_id))
2855 /* Commit changes. */
2856 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2857 np->weight = weight;
2863 update_queue_weight(struct rte_eth_dev *dev,
2864 struct tm_node *nq, uint32_t weight)
2866 struct pmd_internals *p = dev->data->dev_private;
2867 uint32_t queue_id = tm_node_queue_id(dev, nq);
2869 struct tm_node *nt = nq->parent_node;
2871 struct tm_node *np = nt->parent_node;
2872 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2874 struct tm_node *ns = np->parent_node;
2875 uint32_t subport_id = tm_node_subport_id(dev, ns);
2877 uint32_t pipe_be_queue_id =
2878 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2880 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2881 struct rte_sched_pipe_params profile1;
2882 uint32_t pipe_profile_id;
2884 /* Derive new pipe profile. */
2885 memcpy(&profile1, profile0, sizeof(profile1));
2886 profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2888 /* Since implementation does not allow adding more pipe profiles after
2889 * port configuration, the pipe configuration can be successfully
2890 * updated only if the new profile is also part of the existing set
2893 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2896 /* Update the pipe profile used by the current pipe. */
2897 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2898 (int32_t)pipe_profile_id))
2901 /* Commit changes. */
2902 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2903 nq->weight = weight;
2908 /* Traffic manager node parent update */
2910 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2912 uint32_t parent_node_id,
2915 struct rte_tm_error *error)
2919 /* Port must be started and TM used. */
2920 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2921 return -rte_tm_error_set(error,
2923 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2925 rte_strerror(EBUSY));
2927 /* Node must be valid */
2928 n = tm_node_search(dev, node_id);
2930 return -rte_tm_error_set(error,
2932 RTE_TM_ERROR_TYPE_NODE_ID,
2934 rte_strerror(EINVAL));
2936 /* Parent node must be the same */
2937 if (n->parent_node_id != parent_node_id)
2938 return -rte_tm_error_set(error,
2940 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2942 rte_strerror(EINVAL));
2944 /* Priority must be the same */
2945 if (n->priority != priority)
2946 return -rte_tm_error_set(error,
2948 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2950 rte_strerror(EINVAL));
2952 /* weight: must be 1 .. 255 */
2953 if (weight == 0 || weight >= UINT8_MAX)
2954 return -rte_tm_error_set(error,
2956 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2958 rte_strerror(EINVAL));
2961 case TM_NODE_LEVEL_PORT:
2962 return -rte_tm_error_set(error,
2964 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2966 rte_strerror(EINVAL));
2968 case TM_NODE_LEVEL_SUBPORT:
2969 return -rte_tm_error_set(error,
2971 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2973 rte_strerror(EINVAL));
2975 case TM_NODE_LEVEL_PIPE:
2976 if (update_pipe_weight(dev, n, weight))
2977 return -rte_tm_error_set(error,
2979 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2981 rte_strerror(EINVAL));
2984 case TM_NODE_LEVEL_TC:
2985 return -rte_tm_error_set(error,
2987 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2989 rte_strerror(EINVAL));
2991 case TM_NODE_LEVEL_QUEUE:
2994 if (update_queue_weight(dev, n, weight))
2995 return -rte_tm_error_set(error,
2997 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2999 rte_strerror(EINVAL));
3005 update_subport_rate(struct rte_eth_dev *dev,
3007 struct tm_shaper_profile *sp)
3009 struct pmd_internals *p = dev->data->dev_private;
3010 uint32_t subport_id = tm_node_subport_id(dev, ns);
3012 struct rte_sched_subport_profile_params *profile0 =
3013 subport_profile_get(dev, ns);
3014 struct rte_sched_subport_profile_params profile1;
3015 uint32_t subport_profile_id;
3017 if (profile0 == NULL)
3020 /* Derive new pipe profile. */
3021 memcpy(&profile1, profile0, sizeof(profile1));
3022 profile1.tb_rate = sp->params.peak.rate;
3023 profile1.tb_size = sp->params.peak.size;
3025 /* Since implementation does not allow adding more subport profiles
3026 * after port configuration, the pipe configuration can be successfully
3027 * updated only if the new profile is also part of the existing set of
3030 if (subport_profile_exists(dev, &profile1, &subport_profile_id) == 0)
3033 /* Update the subport configuration. */
3034 if (rte_sched_subport_config(SCHED(p), subport_id,
3035 NULL, subport_profile_id))
3038 /* Commit changes. */
3039 ns->shaper_profile->n_users--;
3041 ns->shaper_profile = sp;
3042 ns->params.shaper_profile_id = sp->shaper_profile_id;
3045 subport_profile_mark(dev, subport_id, subport_profile_id);
3047 memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
3055 update_pipe_rate(struct rte_eth_dev *dev,
3057 struct tm_shaper_profile *sp)
3059 struct pmd_internals *p = dev->data->dev_private;
3060 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3062 struct tm_node *ns = np->parent_node;
3063 uint32_t subport_id = tm_node_subport_id(dev, ns);
3065 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
3066 struct rte_sched_pipe_params profile1;
3067 uint32_t pipe_profile_id;
3069 /* Derive new pipe profile. */
3070 memcpy(&profile1, profile0, sizeof(profile1));
3071 profile1.tb_rate = sp->params.peak.rate;
3072 profile1.tb_size = sp->params.peak.size;
3074 /* Since implementation does not allow adding more pipe profiles after
3075 * port configuration, the pipe configuration can be successfully
3076 * updated only if the new profile is also part of the existing set of
3079 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3082 /* Update the pipe profile used by the current pipe. */
3083 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3084 (int32_t)pipe_profile_id))
3087 /* Commit changes. */
3088 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3089 np->shaper_profile->n_users--;
3090 np->shaper_profile = sp;
3091 np->params.shaper_profile_id = sp->shaper_profile_id;
3098 update_tc_rate(struct rte_eth_dev *dev,
3100 struct tm_shaper_profile *sp)
3102 struct pmd_internals *p = dev->data->dev_private;
3103 uint32_t tc_id = tm_node_tc_id(dev, nt);
3105 struct tm_node *np = nt->parent_node;
3106 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3108 struct tm_node *ns = np->parent_node;
3109 uint32_t subport_id = tm_node_subport_id(dev, ns);
3111 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
3112 struct rte_sched_pipe_params profile1;
3113 uint32_t pipe_profile_id;
3115 /* Derive new pipe profile. */
3116 memcpy(&profile1, profile0, sizeof(profile1));
3117 profile1.tc_rate[tc_id] = sp->params.peak.rate;
3119 /* Since implementation does not allow adding more pipe profiles after
3120 * port configuration, the pipe configuration can be successfully
3121 * updated only if the new profile is also part of the existing set of
3124 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3127 /* Update the pipe profile used by the current pipe. */
3128 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3129 (int32_t)pipe_profile_id))
3132 /* Commit changes. */
3133 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3134 nt->shaper_profile->n_users--;
3135 nt->shaper_profile = sp;
3136 nt->params.shaper_profile_id = sp->shaper_profile_id;
3142 /* Traffic manager node shaper update */
3144 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
3146 uint32_t shaper_profile_id,
3147 struct rte_tm_error *error)
3150 struct tm_shaper_profile *sp;
3152 /* Port must be started and TM used. */
3153 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3154 return -rte_tm_error_set(error,
3156 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3158 rte_strerror(EBUSY));
3160 /* Node must be valid */
3161 n = tm_node_search(dev, node_id);
3163 return -rte_tm_error_set(error,
3165 RTE_TM_ERROR_TYPE_NODE_ID,
3167 rte_strerror(EINVAL));
3169 /* Shaper profile must be valid. */
3170 sp = tm_shaper_profile_search(dev, shaper_profile_id);
3172 return -rte_tm_error_set(error,
3174 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
3176 rte_strerror(EINVAL));
3179 case TM_NODE_LEVEL_PORT:
3180 return -rte_tm_error_set(error,
3182 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3184 rte_strerror(EINVAL));
3186 case TM_NODE_LEVEL_SUBPORT:
3187 if (update_subport_rate(dev, n, sp))
3188 return -rte_tm_error_set(error,
3190 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3192 rte_strerror(EINVAL));
3195 case TM_NODE_LEVEL_PIPE:
3196 if (update_pipe_rate(dev, n, sp))
3197 return -rte_tm_error_set(error,
3199 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3201 rte_strerror(EINVAL));
3204 case TM_NODE_LEVEL_TC:
3205 if (update_tc_rate(dev, n, sp))
3206 return -rte_tm_error_set(error,
3208 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3210 rte_strerror(EINVAL));
3213 case TM_NODE_LEVEL_QUEUE:
3216 return -rte_tm_error_set(error,
3218 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3220 rte_strerror(EINVAL));
3224 static inline uint32_t
3225 tm_port_queue_id(struct rte_eth_dev *dev,
3226 uint32_t port_subport_id,
3227 uint32_t subport_pipe_id,
3228 uint32_t pipe_tc_id,
3229 uint32_t tc_queue_id)
3231 struct pmd_internals *p = dev->data->dev_private;
3232 struct tm_hierarchy *h = &p->soft.tm.h;
3233 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3234 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3236 uint32_t port_pipe_id =
3237 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3239 uint32_t port_queue_id =
3240 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3242 return port_queue_id;
3246 read_port_stats(struct rte_eth_dev *dev,
3248 struct rte_tm_node_stats *stats,
3249 uint64_t *stats_mask,
3252 struct pmd_internals *p = dev->data->dev_private;
3253 struct tm_hierarchy *h = &p->soft.tm.h;
3254 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3255 uint32_t subport_id;
3257 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3258 struct rte_sched_subport_stats s;
3262 int status = rte_sched_subport_read_stats(SCHED(p),
3269 /* Stats accumulate */
3270 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3272 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3273 nr->stats.n_bytes +=
3274 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3275 nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3276 s.n_pkts_tc_dropped[id];
3277 nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3278 s.n_bytes_tc_dropped[id];
3284 memcpy(stats, &nr->stats, sizeof(*stats));
3287 *stats_mask = STATS_MASK_DEFAULT;
3291 memset(&nr->stats, 0, sizeof(nr->stats));
3297 read_subport_stats(struct rte_eth_dev *dev,
3299 struct rte_tm_node_stats *stats,
3300 uint64_t *stats_mask,
3303 struct pmd_internals *p = dev->data->dev_private;
3304 uint32_t subport_id = tm_node_subport_id(dev, ns);
3305 struct rte_sched_subport_stats s;
3306 uint32_t tc_ov, tc_id;
3309 int status = rte_sched_subport_read_stats(SCHED(p),
3316 /* Stats accumulate */
3317 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3319 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3320 ns->stats.n_bytes +=
3321 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3322 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3323 s.n_pkts_tc_dropped[tc_id];
3324 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3325 s.n_bytes_tc_dropped[tc_id];
3330 memcpy(stats, &ns->stats, sizeof(*stats));
3333 *stats_mask = STATS_MASK_DEFAULT;
3337 memset(&ns->stats, 0, sizeof(ns->stats));
3343 read_pipe_stats(struct rte_eth_dev *dev,
3345 struct rte_tm_node_stats *stats,
3346 uint64_t *stats_mask,
3349 struct pmd_internals *p = dev->data->dev_private;
3351 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3353 struct tm_node *ns = np->parent_node;
3354 uint32_t subport_id = tm_node_subport_id(dev, ns);
3355 uint32_t tc_id, queue_id;
3359 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3360 struct rte_sched_queue_stats s;
3363 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3367 tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3368 queue_id = i - tc_id;
3371 uint32_t qid = tm_port_queue_id(dev,
3377 int status = rte_sched_queue_read_stats(SCHED(p),
3384 /* Stats accumulate */
3385 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3386 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3387 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3388 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3390 np->stats.leaf.n_pkts_queued = qlen;
3395 memcpy(stats, &np->stats, sizeof(*stats));
3398 *stats_mask = STATS_MASK_DEFAULT;
3402 memset(&np->stats, 0, sizeof(np->stats));
3408 read_tc_stats(struct rte_eth_dev *dev,
3410 struct rte_tm_node_stats *stats,
3411 uint64_t *stats_mask,
3414 struct pmd_internals *p = dev->data->dev_private;
3416 uint32_t tc_id = tm_node_tc_id(dev, nt);
3418 struct tm_node *np = nt->parent_node;
3419 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3421 struct tm_node *ns = np->parent_node;
3422 uint32_t subport_id = tm_node_subport_id(dev, ns);
3423 struct rte_sched_queue_stats s;
3429 if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3430 qid = tm_port_queue_id(dev,
3436 status = rte_sched_queue_read_stats(SCHED(p),
3443 /* Stats accumulate */
3444 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3445 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3446 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3447 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3449 nt->stats.leaf.n_pkts_queued = qlen;
3451 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3452 qid = tm_port_queue_id(dev,
3458 status = rte_sched_queue_read_stats(SCHED(p),
3465 /* Stats accumulate */
3466 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3467 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3468 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3470 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3472 nt->stats.leaf.n_pkts_queued = qlen;
3478 memcpy(stats, &nt->stats, sizeof(*stats));
3481 *stats_mask = STATS_MASK_DEFAULT;
3485 memset(&nt->stats, 0, sizeof(nt->stats));
3491 read_queue_stats(struct rte_eth_dev *dev,
3493 struct rte_tm_node_stats *stats,
3494 uint64_t *stats_mask,
3497 struct pmd_internals *p = dev->data->dev_private;
3498 struct rte_sched_queue_stats s;
3501 uint32_t queue_id = tm_node_queue_id(dev, nq);
3503 struct tm_node *nt = nq->parent_node;
3504 uint32_t tc_id = tm_node_tc_id(dev, nt);
3506 struct tm_node *np = nt->parent_node;
3507 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3509 struct tm_node *ns = np->parent_node;
3510 uint32_t subport_id = tm_node_subport_id(dev, ns);
3513 uint32_t qid = tm_port_queue_id(dev,
3519 int status = rte_sched_queue_read_stats(SCHED(p),
3526 /* Stats accumulate */
3527 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3528 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3529 nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3530 nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3532 nq->stats.leaf.n_pkts_queued = qlen;
3536 memcpy(stats, &nq->stats, sizeof(*stats));
3539 *stats_mask = STATS_MASK_QUEUE;
3543 memset(&nq->stats, 0, sizeof(nq->stats));
3548 /* Traffic manager read stats counters for specific node */
3550 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3552 struct rte_tm_node_stats *stats,
3553 uint64_t *stats_mask,
3555 struct rte_tm_error *error)
3559 /* Port must be started and TM used. */
3560 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3561 return -rte_tm_error_set(error,
3563 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3565 rte_strerror(EBUSY));
3567 /* Node must be valid */
3568 n = tm_node_search(dev, node_id);
3570 return -rte_tm_error_set(error,
3572 RTE_TM_ERROR_TYPE_NODE_ID,
3574 rte_strerror(EINVAL));
3577 case TM_NODE_LEVEL_PORT:
3578 if (read_port_stats(dev, n, stats, stats_mask, clear))
3579 return -rte_tm_error_set(error,
3581 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3583 rte_strerror(EINVAL));
3586 case TM_NODE_LEVEL_SUBPORT:
3587 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3588 return -rte_tm_error_set(error,
3590 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3592 rte_strerror(EINVAL));
3595 case TM_NODE_LEVEL_PIPE:
3596 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3597 return -rte_tm_error_set(error,
3599 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3601 rte_strerror(EINVAL));
3604 case TM_NODE_LEVEL_TC:
3605 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3606 return -rte_tm_error_set(error,
3608 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3610 rte_strerror(EINVAL));
3613 case TM_NODE_LEVEL_QUEUE:
3615 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3616 return -rte_tm_error_set(error,
3618 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3620 rte_strerror(EINVAL));
3625 const struct rte_tm_ops pmd_tm_ops = {
3626 .node_type_get = pmd_tm_node_type_get,
3627 .capabilities_get = pmd_tm_capabilities_get,
3628 .level_capabilities_get = pmd_tm_level_capabilities_get,
3629 .node_capabilities_get = pmd_tm_node_capabilities_get,
3631 .wred_profile_add = pmd_tm_wred_profile_add,
3632 .wred_profile_delete = pmd_tm_wred_profile_delete,
3633 .shared_wred_context_add_update = NULL,
3634 .shared_wred_context_delete = NULL,
3636 .shaper_profile_add = pmd_tm_shaper_profile_add,
3637 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3638 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3639 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3641 .node_add = pmd_tm_node_add,
3642 .node_delete = pmd_tm_node_delete,
3643 .node_suspend = NULL,
3644 .node_resume = NULL,
3645 .hierarchy_commit = pmd_tm_hierarchy_commit,
3647 .node_parent_update = pmd_tm_node_parent_update,
3648 .node_shaper_update = pmd_tm_node_shaper_update,
3649 .node_shared_shaper_update = NULL,
3650 .node_stats_update = NULL,
3651 .node_wfq_weight_mode_update = NULL,
3652 .node_cman_update = NULL,
3653 .node_wred_context_update = NULL,
3654 .node_shared_wred_context_update = NULL,
3656 .node_stats_read = pmd_tm_node_stats_read,