1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_string_fns.h>
12 #include "rte_eth_softnic_internals.h"
13 #include "rte_eth_softnic.h"
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 softnic_tmgr_init(struct pmd_internals *p)
21 TAILQ_INIT(&p->tmgr_port_list);
27 softnic_tmgr_free(struct pmd_internals *p)
30 struct softnic_tmgr_port *tmgr_port;
32 tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
33 if (tmgr_port == NULL)
36 TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
37 rte_sched_port_free(tmgr_port->s);
42 struct softnic_tmgr_port *
43 softnic_tmgr_port_find(struct pmd_internals *p,
46 struct softnic_tmgr_port *tmgr_port;
51 TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
52 if (strcmp(tmgr_port->name, name) == 0)
58 struct softnic_tmgr_port *
59 softnic_tmgr_port_create(struct pmd_internals *p,
62 struct softnic_tmgr_port *tmgr_port;
63 struct tm_params *t = &p->soft.tm.params;
64 struct rte_sched_port *sched;
65 uint32_t n_subports, subport_id;
67 /* Check input params */
69 softnic_tmgr_port_find(p, name))
76 /* Is hierarchy frozen? */
77 if (p->soft.tm.hierarchy_frozen == 0)
81 sched = rte_sched_port_config(&t->port_params);
86 n_subports = t->port_params.n_subports_per_port;
87 for (subport_id = 0; subport_id < n_subports; subport_id++) {
88 uint32_t n_pipes_per_subport =
89 t->subport_params[subport_id].n_pipes_per_subport_enabled;
93 status = rte_sched_subport_config(sched,
95 &t->subport_params[subport_id],
96 t->subport_to_profile[subport_id]);
98 rte_sched_port_free(sched);
103 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
104 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
105 int profile_id = t->pipe_to_profile[pos];
110 status = rte_sched_pipe_config(sched,
115 rte_sched_port_free(sched);
121 /* Node allocation */
122 tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
123 if (tmgr_port == NULL) {
124 rte_sched_port_free(sched);
129 strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
130 tmgr_port->s = sched;
132 /* Node add to list */
133 TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
138 static struct rte_sched_port *
139 SCHED(struct pmd_internals *p)
141 struct softnic_tmgr_port *tmgr_port;
143 tmgr_port = softnic_tmgr_port_find(p, "TMGR");
144 if (tmgr_port == NULL)
151 tm_hierarchy_init(struct pmd_internals *p)
153 memset(&p->soft.tm, 0, sizeof(p->soft.tm));
155 /* Initialize shaper profile list */
156 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
158 /* Initialize shared shaper list */
159 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
161 /* Initialize wred profile list */
162 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
164 /* Initialize TM node list */
165 TAILQ_INIT(&p->soft.tm.h.nodes);
169 tm_hierarchy_free(struct pmd_internals *p)
171 /* Remove all nodes*/
173 struct tm_node *tm_node;
175 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
179 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
183 /* Remove all WRED profiles */
185 struct tm_wred_profile *wred_profile;
187 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
188 if (wred_profile == NULL)
191 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
195 /* Remove all shared shapers */
197 struct tm_shared_shaper *shared_shaper;
199 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
200 if (shared_shaper == NULL)
203 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
207 /* Remove all shaper profiles */
209 struct tm_shaper_profile *shaper_profile;
211 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
212 if (shaper_profile == NULL)
215 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
216 shaper_profile, node);
217 free(shaper_profile);
220 tm_hierarchy_init(p);
223 static struct tm_shaper_profile *
224 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
226 struct pmd_internals *p = dev->data->dev_private;
227 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
228 struct tm_shaper_profile *sp;
230 TAILQ_FOREACH(sp, spl, node)
231 if (shaper_profile_id == sp->shaper_profile_id)
237 static struct tm_shared_shaper *
238 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
240 struct pmd_internals *p = dev->data->dev_private;
241 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
242 struct tm_shared_shaper *ss;
244 TAILQ_FOREACH(ss, ssl, node)
245 if (shared_shaper_id == ss->shared_shaper_id)
251 static struct tm_wred_profile *
252 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
254 struct pmd_internals *p = dev->data->dev_private;
255 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
256 struct tm_wred_profile *wp;
258 TAILQ_FOREACH(wp, wpl, node)
259 if (wred_profile_id == wp->wred_profile_id)
265 static struct tm_node *
266 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
268 struct pmd_internals *p = dev->data->dev_private;
269 struct tm_node_list *nl = &p->soft.tm.h.nodes;
272 TAILQ_FOREACH(n, nl, node)
273 if (n->node_id == node_id)
279 static struct tm_node *
280 tm_root_node_present(struct rte_eth_dev *dev)
282 struct pmd_internals *p = dev->data->dev_private;
283 struct tm_node_list *nl = &p->soft.tm.h.nodes;
286 TAILQ_FOREACH(n, nl, node)
287 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
294 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
296 struct pmd_internals *p = dev->data->dev_private;
297 struct tm_node_list *nl = &p->soft.tm.h.nodes;
302 TAILQ_FOREACH(ns, nl, node) {
303 if (ns->level != TM_NODE_LEVEL_SUBPORT)
306 if (ns->node_id == subport_node->node_id)
316 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
318 struct pmd_internals *p = dev->data->dev_private;
319 struct tm_node_list *nl = &p->soft.tm.h.nodes;
324 TAILQ_FOREACH(np, nl, node) {
325 if (np->level != TM_NODE_LEVEL_PIPE ||
326 np->parent_node_id != pipe_node->parent_node_id)
329 if (np->node_id == pipe_node->node_id)
339 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
341 return tc_node->priority;
345 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
347 struct pmd_internals *p = dev->data->dev_private;
348 struct tm_node_list *nl = &p->soft.tm.h.nodes;
353 TAILQ_FOREACH(nq, nl, node) {
354 if (nq->level != TM_NODE_LEVEL_QUEUE ||
355 nq->parent_node_id != queue_node->parent_node_id)
358 if (nq->node_id == queue_node->node_id)
368 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
370 struct pmd_internals *p = dev->data->dev_private;
371 uint32_t n_queues_max = p->params.tm.n_queues;
373 (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
374 / RTE_SCHED_QUEUES_PER_PIPE;
375 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
376 uint32_t n_subports_max = n_pipes_max;
377 uint32_t n_root_max = 1;
380 case TM_NODE_LEVEL_PORT:
382 case TM_NODE_LEVEL_SUBPORT:
383 return n_subports_max;
384 case TM_NODE_LEVEL_PIPE:
386 case TM_NODE_LEVEL_TC:
388 case TM_NODE_LEVEL_QUEUE:
394 /* Traffic manager node type get */
396 pmd_tm_node_type_get(struct rte_eth_dev *dev,
399 struct rte_tm_error *error)
401 struct pmd_internals *p = dev->data->dev_private;
404 return -rte_tm_error_set(error,
406 RTE_TM_ERROR_TYPE_UNSPECIFIED,
408 rte_strerror(EINVAL));
410 if (node_id == RTE_TM_NODE_ID_NULL ||
411 (tm_node_search(dev, node_id) == NULL))
412 return -rte_tm_error_set(error,
414 RTE_TM_ERROR_TYPE_NODE_ID,
416 rte_strerror(EINVAL));
418 *is_leaf = node_id < p->params.tm.n_queues;
424 #define WRED_SUPPORTED 1
426 #define WRED_SUPPORTED 0
429 #define STATS_MASK_DEFAULT \
430 (RTE_TM_STATS_N_PKTS | \
431 RTE_TM_STATS_N_BYTES | \
432 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
433 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
435 #define STATS_MASK_QUEUE \
436 (STATS_MASK_DEFAULT | \
437 RTE_TM_STATS_N_PKTS_QUEUED)
439 static const struct rte_tm_capabilities tm_cap = {
440 .n_nodes_max = UINT32_MAX,
441 .n_levels_max = TM_NODE_LEVEL_MAX,
443 .non_leaf_nodes_identical = 0,
444 .leaf_nodes_identical = 1,
446 .shaper_n_max = UINT32_MAX,
447 .shaper_private_n_max = UINT32_MAX,
448 .shaper_private_dual_rate_n_max = 0,
449 .shaper_private_rate_min = 1,
450 .shaper_private_rate_max = UINT32_MAX,
451 .shaper_private_packet_mode_supported = 0,
452 .shaper_private_byte_mode_supported = 1,
454 .shaper_shared_n_max = UINT32_MAX,
455 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
456 .shaper_shared_n_shapers_per_node_max = 1,
457 .shaper_shared_dual_rate_n_max = 0,
458 .shaper_shared_rate_min = 1,
459 .shaper_shared_rate_max = UINT32_MAX,
460 .shaper_shared_packet_mode_supported = 0,
461 .shaper_shared_byte_mode_supported = 1,
463 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
464 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
466 .sched_n_children_max = UINT32_MAX,
467 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
468 .sched_wfq_n_children_per_group_max = UINT32_MAX,
469 .sched_wfq_n_groups_max = 1,
470 .sched_wfq_weight_max = UINT32_MAX,
471 .sched_wfq_packet_mode_supported = 0,
472 .sched_wfq_byte_mode_supported = 1,
474 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
475 .cman_wred_byte_mode_supported = 0,
476 .cman_head_drop_supported = 0,
477 .cman_wred_context_n_max = 0,
478 .cman_wred_context_private_n_max = 0,
479 .cman_wred_context_shared_n_max = 0,
480 .cman_wred_context_shared_n_nodes_per_context_max = 0,
481 .cman_wred_context_shared_n_contexts_per_node_max = 0,
483 .mark_vlan_dei_supported = {0, 0, 0},
484 .mark_ip_ecn_tcp_supported = {0, 0, 0},
485 .mark_ip_ecn_sctp_supported = {0, 0, 0},
486 .mark_ip_dscp_supported = {0, 0, 0},
488 .dynamic_update_mask = 0,
490 .stats_mask = STATS_MASK_QUEUE,
493 /* Traffic manager capabilities get */
495 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
496 struct rte_tm_capabilities *cap,
497 struct rte_tm_error *error)
500 return -rte_tm_error_set(error,
502 RTE_TM_ERROR_TYPE_CAPABILITIES,
504 rte_strerror(EINVAL));
506 memcpy(cap, &tm_cap, sizeof(*cap));
508 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
509 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
510 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
511 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
512 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
514 cap->shaper_private_n_max =
515 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
516 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
517 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
518 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
520 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
521 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
523 cap->shaper_n_max = cap->shaper_private_n_max +
524 cap->shaper_shared_n_max;
526 cap->shaper_shared_n_nodes_per_shaper_max =
527 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
529 cap->sched_n_children_max = RTE_MAX(
530 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
531 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
533 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
536 cap->cman_wred_context_private_n_max =
537 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
539 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
540 cap->cman_wred_context_shared_n_max;
545 static const struct rte_tm_level_capabilities tm_level_cap[] = {
546 [TM_NODE_LEVEL_PORT] = {
548 .n_nodes_nonleaf_max = 1,
549 .n_nodes_leaf_max = 0,
550 .non_leaf_nodes_identical = 1,
551 .leaf_nodes_identical = 0,
554 .shaper_private_supported = 1,
555 .shaper_private_dual_rate_supported = 0,
556 .shaper_private_rate_min = 1,
557 .shaper_private_rate_max = UINT32_MAX,
558 .shaper_private_packet_mode_supported = 0,
559 .shaper_private_byte_mode_supported = 1,
560 .shaper_shared_n_max = 0,
561 .shaper_shared_packet_mode_supported = 0,
562 .shaper_shared_byte_mode_supported = 0,
564 .sched_n_children_max = UINT32_MAX,
565 .sched_sp_n_priorities_max = 1,
566 .sched_wfq_n_children_per_group_max = UINT32_MAX,
567 .sched_wfq_n_groups_max = 1,
568 .sched_wfq_weight_max = 1,
569 .sched_wfq_packet_mode_supported = 0,
570 .sched_wfq_byte_mode_supported = 1,
572 .stats_mask = STATS_MASK_DEFAULT,
576 [TM_NODE_LEVEL_SUBPORT] = {
577 .n_nodes_max = UINT32_MAX,
578 .n_nodes_nonleaf_max = UINT32_MAX,
579 .n_nodes_leaf_max = 0,
580 .non_leaf_nodes_identical = 1,
581 .leaf_nodes_identical = 0,
584 .shaper_private_supported = 1,
585 .shaper_private_dual_rate_supported = 0,
586 .shaper_private_rate_min = 1,
587 .shaper_private_rate_max = UINT32_MAX,
588 .shaper_private_packet_mode_supported = 0,
589 .shaper_private_byte_mode_supported = 1,
590 .shaper_shared_n_max = 0,
591 .shaper_shared_packet_mode_supported = 0,
592 .shaper_shared_byte_mode_supported = 0,
594 .sched_n_children_max = UINT32_MAX,
595 .sched_sp_n_priorities_max = 1,
596 .sched_wfq_n_children_per_group_max = UINT32_MAX,
597 .sched_wfq_n_groups_max = 1,
598 #ifdef RTE_SCHED_SUBPORT_TC_OV
599 .sched_wfq_weight_max = UINT32_MAX,
600 .sched_wfq_packet_mode_supported = 0,
601 .sched_wfq_byte_mode_supported = 1,
603 .sched_wfq_weight_max = 1,
604 .sched_wfq_packet_mode_supported = 0,
605 .sched_wfq_byte_mode_supported = 1,
608 .stats_mask = STATS_MASK_DEFAULT,
612 [TM_NODE_LEVEL_PIPE] = {
613 .n_nodes_max = UINT32_MAX,
614 .n_nodes_nonleaf_max = UINT32_MAX,
615 .n_nodes_leaf_max = 0,
616 .non_leaf_nodes_identical = 1,
617 .leaf_nodes_identical = 0,
620 .shaper_private_supported = 1,
621 .shaper_private_dual_rate_supported = 0,
622 .shaper_private_rate_min = 1,
623 .shaper_private_rate_max = UINT32_MAX,
624 .shaper_private_packet_mode_supported = 0,
625 .shaper_private_byte_mode_supported = 1,
626 .shaper_shared_n_max = 0,
627 .shaper_shared_packet_mode_supported = 0,
628 .shaper_shared_byte_mode_supported = 0,
630 .sched_n_children_max =
631 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
632 .sched_sp_n_priorities_max =
633 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
634 .sched_wfq_n_children_per_group_max = 1,
635 .sched_wfq_n_groups_max = 0,
636 .sched_wfq_weight_max = 1,
637 .sched_wfq_packet_mode_supported = 0,
638 .sched_wfq_byte_mode_supported = 0,
640 .stats_mask = STATS_MASK_DEFAULT,
644 [TM_NODE_LEVEL_TC] = {
645 .n_nodes_max = UINT32_MAX,
646 .n_nodes_nonleaf_max = UINT32_MAX,
647 .n_nodes_leaf_max = 0,
648 .non_leaf_nodes_identical = 1,
649 .leaf_nodes_identical = 0,
652 .shaper_private_supported = 1,
653 .shaper_private_dual_rate_supported = 0,
654 .shaper_private_rate_min = 1,
655 .shaper_private_rate_max = UINT32_MAX,
656 .shaper_private_packet_mode_supported = 0,
657 .shaper_private_byte_mode_supported = 1,
658 .shaper_shared_n_max = 1,
659 .shaper_shared_packet_mode_supported = 0,
660 .shaper_shared_byte_mode_supported = 1,
662 .sched_n_children_max =
663 RTE_SCHED_BE_QUEUES_PER_PIPE,
664 .sched_sp_n_priorities_max = 1,
665 .sched_wfq_n_children_per_group_max =
666 RTE_SCHED_BE_QUEUES_PER_PIPE,
667 .sched_wfq_n_groups_max = 1,
668 .sched_wfq_weight_max = UINT32_MAX,
669 .sched_wfq_packet_mode_supported = 0,
670 .sched_wfq_byte_mode_supported = 1,
672 .stats_mask = STATS_MASK_DEFAULT,
676 [TM_NODE_LEVEL_QUEUE] = {
677 .n_nodes_max = UINT32_MAX,
678 .n_nodes_nonleaf_max = 0,
679 .n_nodes_leaf_max = UINT32_MAX,
680 .non_leaf_nodes_identical = 0,
681 .leaf_nodes_identical = 1,
684 .shaper_private_supported = 0,
685 .shaper_private_dual_rate_supported = 0,
686 .shaper_private_rate_min = 0,
687 .shaper_private_rate_max = 0,
688 .shaper_private_packet_mode_supported = 0,
689 .shaper_private_byte_mode_supported = 0,
690 .shaper_shared_n_max = 0,
691 .shaper_shared_packet_mode_supported = 0,
692 .shaper_shared_byte_mode_supported = 0,
694 .cman_head_drop_supported = 0,
695 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
696 .cman_wred_byte_mode_supported = 0,
697 .cman_wred_context_private_supported = WRED_SUPPORTED,
698 .cman_wred_context_shared_n_max = 0,
700 .stats_mask = STATS_MASK_QUEUE,
705 /* Traffic manager level capabilities get */
707 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
709 struct rte_tm_level_capabilities *cap,
710 struct rte_tm_error *error)
713 return -rte_tm_error_set(error,
715 RTE_TM_ERROR_TYPE_CAPABILITIES,
717 rte_strerror(EINVAL));
719 if (level_id >= TM_NODE_LEVEL_MAX)
720 return -rte_tm_error_set(error,
722 RTE_TM_ERROR_TYPE_LEVEL_ID,
724 rte_strerror(EINVAL));
726 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
729 case TM_NODE_LEVEL_PORT:
730 cap->nonleaf.sched_n_children_max =
731 tm_level_get_max_nodes(dev,
732 TM_NODE_LEVEL_SUBPORT);
733 cap->nonleaf.sched_wfq_n_children_per_group_max =
734 cap->nonleaf.sched_n_children_max;
737 case TM_NODE_LEVEL_SUBPORT:
738 cap->n_nodes_max = tm_level_get_max_nodes(dev,
739 TM_NODE_LEVEL_SUBPORT);
740 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
741 cap->nonleaf.sched_n_children_max =
742 tm_level_get_max_nodes(dev,
744 cap->nonleaf.sched_wfq_n_children_per_group_max =
745 cap->nonleaf.sched_n_children_max;
748 case TM_NODE_LEVEL_PIPE:
749 cap->n_nodes_max = tm_level_get_max_nodes(dev,
751 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
754 case TM_NODE_LEVEL_TC:
755 cap->n_nodes_max = tm_level_get_max_nodes(dev,
757 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
760 case TM_NODE_LEVEL_QUEUE:
762 cap->n_nodes_max = tm_level_get_max_nodes(dev,
763 TM_NODE_LEVEL_QUEUE);
764 cap->n_nodes_leaf_max = cap->n_nodes_max;
771 static const struct rte_tm_node_capabilities tm_node_cap[] = {
772 [TM_NODE_LEVEL_PORT] = {
773 .shaper_private_supported = 1,
774 .shaper_private_dual_rate_supported = 0,
775 .shaper_private_rate_min = 1,
776 .shaper_private_rate_max = UINT32_MAX,
777 .shaper_private_packet_mode_supported = 0,
778 .shaper_private_byte_mode_supported = 1,
779 .shaper_shared_n_max = 0,
780 .shaper_shared_packet_mode_supported = 0,
781 .shaper_shared_byte_mode_supported = 0,
784 .sched_n_children_max = UINT32_MAX,
785 .sched_sp_n_priorities_max = 1,
786 .sched_wfq_n_children_per_group_max = UINT32_MAX,
787 .sched_wfq_n_groups_max = 1,
788 .sched_wfq_weight_max = 1,
789 .sched_wfq_packet_mode_supported = 0,
790 .sched_wfq_byte_mode_supported = 1,
793 .stats_mask = STATS_MASK_DEFAULT,
796 [TM_NODE_LEVEL_SUBPORT] = {
797 .shaper_private_supported = 1,
798 .shaper_private_dual_rate_supported = 0,
799 .shaper_private_rate_min = 1,
800 .shaper_private_rate_max = UINT32_MAX,
801 .shaper_private_packet_mode_supported = 0,
802 .shaper_private_byte_mode_supported = 1,
803 .shaper_shared_n_max = 0,
804 .shaper_shared_packet_mode_supported = 0,
805 .shaper_shared_byte_mode_supported = 0,
808 .sched_n_children_max = UINT32_MAX,
809 .sched_sp_n_priorities_max = 1,
810 .sched_wfq_n_children_per_group_max = UINT32_MAX,
811 .sched_wfq_n_groups_max = 1,
812 .sched_wfq_weight_max = UINT32_MAX,
813 .sched_wfq_packet_mode_supported = 0,
814 .sched_wfq_byte_mode_supported = 1,
817 .stats_mask = STATS_MASK_DEFAULT,
820 [TM_NODE_LEVEL_PIPE] = {
821 .shaper_private_supported = 1,
822 .shaper_private_dual_rate_supported = 0,
823 .shaper_private_rate_min = 1,
824 .shaper_private_rate_max = UINT32_MAX,
825 .shaper_private_packet_mode_supported = 0,
826 .shaper_private_byte_mode_supported = 1,
827 .shaper_shared_n_max = 0,
828 .shaper_shared_packet_mode_supported = 0,
829 .shaper_shared_byte_mode_supported = 0,
832 .sched_n_children_max =
833 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
834 .sched_sp_n_priorities_max =
835 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
836 .sched_wfq_n_children_per_group_max = 1,
837 .sched_wfq_n_groups_max = 0,
838 .sched_wfq_weight_max = 1,
839 .sched_wfq_packet_mode_supported = 0,
840 .sched_wfq_byte_mode_supported = 0,
843 .stats_mask = STATS_MASK_DEFAULT,
846 [TM_NODE_LEVEL_TC] = {
847 .shaper_private_supported = 1,
848 .shaper_private_dual_rate_supported = 0,
849 .shaper_private_rate_min = 1,
850 .shaper_private_rate_max = UINT32_MAX,
851 .shaper_private_packet_mode_supported = 0,
852 .shaper_private_byte_mode_supported = 1,
853 .shaper_shared_n_max = 1,
854 .shaper_shared_packet_mode_supported = 0,
855 .shaper_shared_byte_mode_supported = 1,
858 .sched_n_children_max =
859 RTE_SCHED_BE_QUEUES_PER_PIPE,
860 .sched_sp_n_priorities_max = 1,
861 .sched_wfq_n_children_per_group_max =
862 RTE_SCHED_BE_QUEUES_PER_PIPE,
863 .sched_wfq_n_groups_max = 1,
864 .sched_wfq_weight_max = UINT32_MAX,
865 .sched_wfq_packet_mode_supported = 0,
866 .sched_wfq_byte_mode_supported = 1,
869 .stats_mask = STATS_MASK_DEFAULT,
872 [TM_NODE_LEVEL_QUEUE] = {
873 .shaper_private_supported = 0,
874 .shaper_private_dual_rate_supported = 0,
875 .shaper_private_rate_min = 0,
876 .shaper_private_rate_max = 0,
877 .shaper_private_packet_mode_supported = 0,
878 .shaper_private_byte_mode_supported = 0,
879 .shaper_shared_n_max = 0,
880 .shaper_shared_packet_mode_supported = 0,
881 .shaper_shared_byte_mode_supported = 0,
885 .cman_head_drop_supported = 0,
886 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
887 .cman_wred_byte_mode_supported = 0,
888 .cman_wred_context_private_supported = WRED_SUPPORTED,
889 .cman_wred_context_shared_n_max = 0,
892 .stats_mask = STATS_MASK_QUEUE,
896 /* Traffic manager node capabilities get */
898 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
900 struct rte_tm_node_capabilities *cap,
901 struct rte_tm_error *error)
903 struct tm_node *tm_node;
906 return -rte_tm_error_set(error,
908 RTE_TM_ERROR_TYPE_CAPABILITIES,
910 rte_strerror(EINVAL));
912 tm_node = tm_node_search(dev, node_id);
914 return -rte_tm_error_set(error,
916 RTE_TM_ERROR_TYPE_NODE_ID,
918 rte_strerror(EINVAL));
920 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
922 switch (tm_node->level) {
923 case TM_NODE_LEVEL_PORT:
924 cap->nonleaf.sched_n_children_max =
925 tm_level_get_max_nodes(dev,
926 TM_NODE_LEVEL_SUBPORT);
927 cap->nonleaf.sched_wfq_n_children_per_group_max =
928 cap->nonleaf.sched_n_children_max;
931 case TM_NODE_LEVEL_SUBPORT:
932 cap->nonleaf.sched_n_children_max =
933 tm_level_get_max_nodes(dev,
935 cap->nonleaf.sched_wfq_n_children_per_group_max =
936 cap->nonleaf.sched_n_children_max;
939 case TM_NODE_LEVEL_PIPE:
940 case TM_NODE_LEVEL_TC:
941 case TM_NODE_LEVEL_QUEUE:
950 shaper_profile_check(struct rte_eth_dev *dev,
951 uint32_t shaper_profile_id,
952 struct rte_tm_shaper_params *profile,
953 struct rte_tm_error *error)
955 struct tm_shaper_profile *sp;
957 /* Shaper profile ID must not be NONE. */
958 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
959 return -rte_tm_error_set(error,
961 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
963 rte_strerror(EINVAL));
965 /* Shaper profile must not exist. */
966 sp = tm_shaper_profile_search(dev, shaper_profile_id);
968 return -rte_tm_error_set(error,
970 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
972 rte_strerror(EEXIST));
974 /* Profile must not be NULL. */
976 return -rte_tm_error_set(error,
978 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
980 rte_strerror(EINVAL));
982 /* Peak rate: non-zero, 32-bit */
983 if (profile->peak.rate == 0 ||
984 profile->peak.rate >= UINT32_MAX)
985 return -rte_tm_error_set(error,
987 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
989 rte_strerror(EINVAL));
991 /* Peak size: non-zero, 32-bit */
992 if (profile->peak.size == 0 ||
993 profile->peak.size >= UINT32_MAX)
994 return -rte_tm_error_set(error,
996 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
998 rte_strerror(EINVAL));
1000 /* Dual-rate profiles are not supported. */
1001 if (profile->committed.rate != 0)
1002 return -rte_tm_error_set(error,
1004 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
1006 rte_strerror(EINVAL));
1008 /* Packet length adjust: 24 bytes */
1009 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
1010 return -rte_tm_error_set(error,
1012 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
1014 rte_strerror(EINVAL));
1016 /* Packet mode is not supported. */
1017 if (profile->packet_mode != 0)
1018 return -rte_tm_error_set(error,
1020 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE,
1022 rte_strerror(EINVAL));
1026 /* Traffic manager shaper profile add */
1028 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
1029 uint32_t shaper_profile_id,
1030 struct rte_tm_shaper_params *profile,
1031 struct rte_tm_error *error)
1033 struct pmd_internals *p = dev->data->dev_private;
1034 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
1035 struct tm_shaper_profile *sp;
1038 /* Check input params */
1039 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
1043 /* Memory allocation */
1044 sp = calloc(1, sizeof(struct tm_shaper_profile));
1046 return -rte_tm_error_set(error,
1048 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1050 rte_strerror(ENOMEM));
1053 sp->shaper_profile_id = shaper_profile_id;
1054 memcpy(&sp->params, profile, sizeof(sp->params));
1057 TAILQ_INSERT_TAIL(spl, sp, node);
1058 p->soft.tm.h.n_shaper_profiles++;
1063 /* Traffic manager shaper profile delete */
1065 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1066 uint32_t shaper_profile_id,
1067 struct rte_tm_error *error)
1069 struct pmd_internals *p = dev->data->dev_private;
1070 struct tm_shaper_profile *sp;
1072 /* Check existing */
1073 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1075 return -rte_tm_error_set(error,
1077 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1079 rte_strerror(EINVAL));
1083 return -rte_tm_error_set(error,
1085 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1087 rte_strerror(EBUSY));
1089 /* Remove from list */
1090 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1091 p->soft.tm.h.n_shaper_profiles--;
1097 static struct tm_node *
1098 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1099 struct tm_shared_shaper *ss)
1101 struct pmd_internals *p = dev->data->dev_private;
1102 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1105 /* Subport: each TC uses shared shaper */
1106 TAILQ_FOREACH(n, nl, node) {
1107 if (n->level != TM_NODE_LEVEL_TC ||
1108 n->params.n_shared_shapers == 0 ||
1109 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1119 subport_profile_exists(struct rte_eth_dev *dev,
1120 struct rte_sched_subport_profile_params *sp,
1121 uint32_t *subport_profile_id)
1123 struct pmd_internals *p = dev->data->dev_private;
1124 struct tm_params *t = &p->soft.tm.params;
1127 for (i = 0; i < t->n_subport_profiles; i++)
1128 if (memcmp(&t->subport_profile[i], sp, sizeof(*sp)) == 0) {
1129 if (subport_profile_id)
1130 *subport_profile_id = i;
1138 update_subport_tc_rate(struct rte_eth_dev *dev,
1140 struct tm_shared_shaper *ss,
1141 struct tm_shaper_profile *sp_new)
1143 struct rte_sched_subport_profile_params subport_profile;
1144 struct pmd_internals *p = dev->data->dev_private;
1145 uint32_t tc_id = tm_node_tc_id(dev, nt);
1146 struct tm_node *np = nt->parent_node;
1147 struct tm_node *ns = np->parent_node;
1148 uint32_t subport_id = tm_node_subport_id(dev, ns);
1149 struct tm_params *t = &p->soft.tm.params;
1150 uint32_t subport_profile_id = t->subport_to_profile[subport_id];
1151 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1152 ss->shaper_profile_id);
1154 /* Derive new subport configuration. */
1155 memcpy(&subport_profile,
1156 &p->soft.tm.params.subport_profile[subport_profile_id],
1157 sizeof(subport_profile));
1158 subport_profile.tc_rate[tc_id] = sp_new->params.peak.rate;
1160 /* Update the subport configuration. */
1161 if (rte_sched_subport_config(SCHED(p),
1162 subport_id, NULL, subport_profile_id))
1165 /* Commit changes. */
1168 ss->shaper_profile_id = sp_new->shaper_profile_id;
1171 memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
1173 sizeof(subport_profile));
1178 /* Traffic manager shared shaper add/update */
1180 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1181 uint32_t shared_shaper_id,
1182 uint32_t shaper_profile_id,
1183 struct rte_tm_error *error)
1185 struct pmd_internals *p = dev->data->dev_private;
1186 struct tm_shared_shaper *ss;
1187 struct tm_shaper_profile *sp;
1190 /* Shaper profile must be valid. */
1191 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1193 return -rte_tm_error_set(error,
1195 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1197 rte_strerror(EINVAL));
1200 * Add new shared shaper
1202 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1204 struct tm_shared_shaper_list *ssl =
1205 &p->soft.tm.h.shared_shapers;
1207 /* Hierarchy must not be frozen */
1208 if (p->soft.tm.hierarchy_frozen)
1209 return -rte_tm_error_set(error,
1211 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1213 rte_strerror(EBUSY));
1215 /* Memory allocation */
1216 ss = calloc(1, sizeof(struct tm_shared_shaper));
1218 return -rte_tm_error_set(error,
1220 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1222 rte_strerror(ENOMEM));
1225 ss->shared_shaper_id = shared_shaper_id;
1226 ss->shaper_profile_id = shaper_profile_id;
1229 TAILQ_INSERT_TAIL(ssl, ss, node);
1230 p->soft.tm.h.n_shared_shapers++;
1236 * Update existing shared shaper
1238 /* Hierarchy must be frozen (run-time update) */
1239 if (p->soft.tm.hierarchy_frozen == 0)
1240 return -rte_tm_error_set(error,
1242 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1244 rte_strerror(EBUSY));
1247 /* Propagate change. */
1248 nt = tm_shared_shaper_get_tc(dev, ss);
1249 if (update_subport_tc_rate(dev, nt, ss, sp))
1250 return -rte_tm_error_set(error,
1252 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1254 rte_strerror(EINVAL));
1259 /* Traffic manager shared shaper delete */
1261 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1262 uint32_t shared_shaper_id,
1263 struct rte_tm_error *error)
1265 struct pmd_internals *p = dev->data->dev_private;
1266 struct tm_shared_shaper *ss;
1268 /* Check existing */
1269 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1271 return -rte_tm_error_set(error,
1273 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1275 rte_strerror(EINVAL));
1279 return -rte_tm_error_set(error,
1281 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1283 rte_strerror(EBUSY));
1285 /* Remove from list */
1286 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1287 p->soft.tm.h.n_shared_shapers--;
1294 wred_profile_check(struct rte_eth_dev *dev,
1295 uint32_t wred_profile_id,
1296 struct rte_tm_wred_params *profile,
1297 struct rte_tm_error *error)
1299 struct tm_wred_profile *wp;
1300 enum rte_color color;
1302 /* WRED profile ID must not be NONE. */
1303 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1304 return -rte_tm_error_set(error,
1306 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1308 rte_strerror(EINVAL));
1310 /* WRED profile must not exist. */
1311 wp = tm_wred_profile_search(dev, wred_profile_id);
1313 return -rte_tm_error_set(error,
1315 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1317 rte_strerror(EEXIST));
1319 /* Profile must not be NULL. */
1320 if (profile == NULL)
1321 return -rte_tm_error_set(error,
1323 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1325 rte_strerror(EINVAL));
1327 /* WRED profile should be in packet mode */
1328 if (profile->packet_mode == 0)
1329 return -rte_tm_error_set(error,
1331 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1333 rte_strerror(ENOTSUP));
1335 /* min_th <= max_th, max_th > 0 */
1336 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
1337 uint32_t min_th = profile->red_params[color].min_th;
1338 uint32_t max_th = profile->red_params[color].max_th;
1340 if (min_th > max_th ||
1342 min_th > UINT16_MAX ||
1343 max_th > UINT16_MAX)
1344 return -rte_tm_error_set(error,
1346 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1348 rte_strerror(EINVAL));
1354 /* Traffic manager WRED profile add */
1356 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1357 uint32_t wred_profile_id,
1358 struct rte_tm_wred_params *profile,
1359 struct rte_tm_error *error)
1361 struct pmd_internals *p = dev->data->dev_private;
1362 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1363 struct tm_wred_profile *wp;
1366 /* Check input params */
1367 status = wred_profile_check(dev, wred_profile_id, profile, error);
1371 /* Memory allocation */
1372 wp = calloc(1, sizeof(struct tm_wred_profile));
1374 return -rte_tm_error_set(error,
1376 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1378 rte_strerror(ENOMEM));
1381 wp->wred_profile_id = wred_profile_id;
1382 memcpy(&wp->params, profile, sizeof(wp->params));
1385 TAILQ_INSERT_TAIL(wpl, wp, node);
1386 p->soft.tm.h.n_wred_profiles++;
1391 /* Traffic manager WRED profile delete */
1393 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1394 uint32_t wred_profile_id,
1395 struct rte_tm_error *error)
1397 struct pmd_internals *p = dev->data->dev_private;
1398 struct tm_wred_profile *wp;
1400 /* Check existing */
1401 wp = tm_wred_profile_search(dev, wred_profile_id);
1403 return -rte_tm_error_set(error,
1405 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1407 rte_strerror(EINVAL));
1411 return -rte_tm_error_set(error,
1413 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1415 rte_strerror(EBUSY));
1417 /* Remove from list */
1418 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1419 p->soft.tm.h.n_wred_profiles--;
1426 node_add_check_port(struct rte_eth_dev *dev,
1428 uint32_t parent_node_id __rte_unused,
1431 uint32_t level_id __rte_unused,
1432 struct rte_tm_node_params *params,
1433 struct rte_tm_error *error)
1435 struct pmd_internals *p = dev->data->dev_private;
1436 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1437 params->shaper_profile_id);
1439 /* node type: non-leaf */
1440 if (node_id < p->params.tm.n_queues)
1441 return -rte_tm_error_set(error,
1443 RTE_TM_ERROR_TYPE_NODE_ID,
1445 rte_strerror(EINVAL));
1447 /* Priority must be 0 */
1449 return -rte_tm_error_set(error,
1451 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1453 rte_strerror(EINVAL));
1455 /* Weight must be 1 */
1457 return -rte_tm_error_set(error,
1459 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1461 rte_strerror(EINVAL));
1463 /* Shaper must be valid */
1464 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1466 return -rte_tm_error_set(error,
1468 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1470 rte_strerror(EINVAL));
1472 /* No shared shapers */
1473 if (params->n_shared_shapers != 0)
1474 return -rte_tm_error_set(error,
1476 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1478 rte_strerror(EINVAL));
1480 /* Number of SP priorities must be 1 */
1481 if (params->nonleaf.n_sp_priorities != 1)
1482 return -rte_tm_error_set(error,
1484 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1486 rte_strerror(EINVAL));
1489 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1490 return -rte_tm_error_set(error,
1492 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1494 rte_strerror(EINVAL));
1500 node_add_check_subport(struct rte_eth_dev *dev,
1502 uint32_t parent_node_id __rte_unused,
1505 uint32_t level_id __rte_unused,
1506 struct rte_tm_node_params *params,
1507 struct rte_tm_error *error)
1509 struct pmd_internals *p = dev->data->dev_private;
1511 /* node type: non-leaf */
1512 if (node_id < p->params.tm.n_queues)
1513 return -rte_tm_error_set(error,
1515 RTE_TM_ERROR_TYPE_NODE_ID,
1517 rte_strerror(EINVAL));
1519 /* Priority must be 0 */
1521 return -rte_tm_error_set(error,
1523 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1525 rte_strerror(EINVAL));
1527 /* Weight must be 1 */
1529 return -rte_tm_error_set(error,
1531 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1533 rte_strerror(EINVAL));
1535 /* Shaper must be valid */
1536 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1537 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1538 return -rte_tm_error_set(error,
1540 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1542 rte_strerror(EINVAL));
1544 /* No shared shapers */
1545 if (params->n_shared_shapers != 0)
1546 return -rte_tm_error_set(error,
1548 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1550 rte_strerror(EINVAL));
1552 /* Number of SP priorities must be 1 */
1553 if (params->nonleaf.n_sp_priorities != 1)
1554 return -rte_tm_error_set(error,
1556 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1558 rte_strerror(EINVAL));
1561 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1562 return -rte_tm_error_set(error,
1564 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1566 rte_strerror(EINVAL));
1572 node_add_check_pipe(struct rte_eth_dev *dev,
1574 uint32_t parent_node_id __rte_unused,
1576 uint32_t weight __rte_unused,
1577 uint32_t level_id __rte_unused,
1578 struct rte_tm_node_params *params,
1579 struct rte_tm_error *error)
1581 struct pmd_internals *p = dev->data->dev_private;
1583 /* node type: non-leaf */
1584 if (node_id < p->params.tm.n_queues)
1585 return -rte_tm_error_set(error,
1587 RTE_TM_ERROR_TYPE_NODE_ID,
1589 rte_strerror(EINVAL));
1591 /* Priority must be 0 */
1593 return -rte_tm_error_set(error,
1595 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1597 rte_strerror(EINVAL));
1599 /* Shaper must be valid */
1600 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1601 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1602 return -rte_tm_error_set(error,
1604 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1606 rte_strerror(EINVAL));
1608 /* No shared shapers */
1609 if (params->n_shared_shapers != 0)
1610 return -rte_tm_error_set(error,
1612 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1614 rte_strerror(EINVAL));
1616 /* Number of SP priorities must be 4 */
1617 if (params->nonleaf.n_sp_priorities !=
1618 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1619 return -rte_tm_error_set(error,
1621 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1623 rte_strerror(EINVAL));
1625 /* WFQ mode must be byte mode */
1626 if (params->nonleaf.wfq_weight_mode != NULL &&
1627 params->nonleaf.wfq_weight_mode[0] != 0 &&
1628 params->nonleaf.wfq_weight_mode[1] != 0 &&
1629 params->nonleaf.wfq_weight_mode[2] != 0 &&
1630 params->nonleaf.wfq_weight_mode[3] != 0)
1631 return -rte_tm_error_set(error,
1633 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1635 rte_strerror(EINVAL));
1638 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1639 return -rte_tm_error_set(error,
1641 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1643 rte_strerror(EINVAL));
1649 node_add_check_tc(struct rte_eth_dev *dev,
1651 uint32_t parent_node_id __rte_unused,
1652 uint32_t priority __rte_unused,
1654 uint32_t level_id __rte_unused,
1655 struct rte_tm_node_params *params,
1656 struct rte_tm_error *error)
1658 struct pmd_internals *p = dev->data->dev_private;
1660 /* node type: non-leaf */
1661 if (node_id < p->params.tm.n_queues)
1662 return -rte_tm_error_set(error,
1664 RTE_TM_ERROR_TYPE_NODE_ID,
1666 rte_strerror(EINVAL));
1668 /* Weight must be 1 */
1670 return -rte_tm_error_set(error,
1672 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1674 rte_strerror(EINVAL));
1676 /* Shaper must be valid */
1677 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1678 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1679 return -rte_tm_error_set(error,
1681 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1683 rte_strerror(EINVAL));
1685 /* Single valid shared shaper */
1686 if (params->n_shared_shapers > 1)
1687 return -rte_tm_error_set(error,
1689 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1691 rte_strerror(EINVAL));
1693 if (params->n_shared_shapers == 1 &&
1694 (params->shared_shaper_id == NULL ||
1695 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1696 return -rte_tm_error_set(error,
1698 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1700 rte_strerror(EINVAL));
1702 /* Number of priorities must be 1 */
1703 if (params->nonleaf.n_sp_priorities != 1)
1704 return -rte_tm_error_set(error,
1706 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1708 rte_strerror(EINVAL));
1711 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1712 return -rte_tm_error_set(error,
1714 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1716 rte_strerror(EINVAL));
1722 node_add_check_queue(struct rte_eth_dev *dev,
1724 uint32_t parent_node_id __rte_unused,
1726 uint32_t weight __rte_unused,
1727 uint32_t level_id __rte_unused,
1728 struct rte_tm_node_params *params,
1729 struct rte_tm_error *error)
1731 struct pmd_internals *p = dev->data->dev_private;
1733 /* node type: leaf */
1734 if (node_id >= p->params.tm.n_queues)
1735 return -rte_tm_error_set(error,
1737 RTE_TM_ERROR_TYPE_NODE_ID,
1739 rte_strerror(EINVAL));
1741 /* Priority must be 0 */
1743 return -rte_tm_error_set(error,
1745 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1747 rte_strerror(EINVAL));
1750 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1751 return -rte_tm_error_set(error,
1753 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1755 rte_strerror(EINVAL));
1757 /* No shared shapers */
1758 if (params->n_shared_shapers != 0)
1759 return -rte_tm_error_set(error,
1761 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1763 rte_strerror(EINVAL));
1765 /* Congestion management must not be head drop */
1766 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1767 return -rte_tm_error_set(error,
1769 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1771 rte_strerror(EINVAL));
1773 /* Congestion management set to WRED */
1774 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1775 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1776 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1779 /* WRED profile (for private WRED context) must be valid */
1780 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1782 return -rte_tm_error_set(error,
1784 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1786 rte_strerror(EINVAL));
1788 /* No shared WRED contexts */
1789 if (params->leaf.wred.n_shared_wred_contexts != 0)
1790 return -rte_tm_error_set(error,
1792 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1794 rte_strerror(EINVAL));
1798 if (params->stats_mask & ~STATS_MASK_QUEUE)
1799 return -rte_tm_error_set(error,
1801 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1803 rte_strerror(EINVAL));
1809 node_add_check(struct rte_eth_dev *dev,
1811 uint32_t parent_node_id,
1815 struct rte_tm_node_params *params,
1816 struct rte_tm_error *error)
1822 /* node_id, parent_node_id:
1823 * -node_id must not be RTE_TM_NODE_ID_NULL
1824 * -node_id must not be in use
1825 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1826 * -root node must not exist
1827 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1828 * -parent_node_id must be valid
1830 if (node_id == RTE_TM_NODE_ID_NULL)
1831 return -rte_tm_error_set(error,
1833 RTE_TM_ERROR_TYPE_NODE_ID,
1835 rte_strerror(EINVAL));
1837 if (tm_node_search(dev, node_id))
1838 return -rte_tm_error_set(error,
1840 RTE_TM_ERROR_TYPE_NODE_ID,
1842 rte_strerror(EEXIST));
1844 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1846 if (tm_root_node_present(dev))
1847 return -rte_tm_error_set(error,
1849 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1851 rte_strerror(EEXIST));
1853 pn = tm_node_search(dev, parent_node_id);
1855 return -rte_tm_error_set(error,
1857 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1859 rte_strerror(EINVAL));
1862 /* priority: must be 0 .. 3 */
1863 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1864 return -rte_tm_error_set(error,
1866 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1868 rte_strerror(EINVAL));
1870 /* weight: must be 1 .. 255 */
1871 if (weight == 0 || weight >= UINT8_MAX)
1872 return -rte_tm_error_set(error,
1874 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1876 rte_strerror(EINVAL));
1878 /* level_id: if valid, then
1879 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1880 * -level_id must be zero
1881 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1882 * -level_id must be parent level ID plus one
1884 level = (pn == NULL) ? 0 : pn->level + 1;
1885 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1886 return -rte_tm_error_set(error,
1888 RTE_TM_ERROR_TYPE_LEVEL_ID,
1890 rte_strerror(EINVAL));
1892 /* params: must not be NULL */
1894 return -rte_tm_error_set(error,
1896 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1898 rte_strerror(EINVAL));
1900 /* params: per level checks */
1902 case TM_NODE_LEVEL_PORT:
1903 status = node_add_check_port(dev, node_id,
1904 parent_node_id, priority, weight, level_id,
1910 case TM_NODE_LEVEL_SUBPORT:
1911 status = node_add_check_subport(dev, node_id,
1912 parent_node_id, priority, weight, level_id,
1918 case TM_NODE_LEVEL_PIPE:
1919 status = node_add_check_pipe(dev, node_id,
1920 parent_node_id, priority, weight, level_id,
1926 case TM_NODE_LEVEL_TC:
1927 status = node_add_check_tc(dev, node_id,
1928 parent_node_id, priority, weight, level_id,
1934 case TM_NODE_LEVEL_QUEUE:
1935 status = node_add_check_queue(dev, node_id,
1936 parent_node_id, priority, weight, level_id,
1943 return -rte_tm_error_set(error,
1945 RTE_TM_ERROR_TYPE_LEVEL_ID,
1947 rte_strerror(EINVAL));
1953 /* Traffic manager node add */
1955 pmd_tm_node_add(struct rte_eth_dev *dev,
1957 uint32_t parent_node_id,
1961 struct rte_tm_node_params *params,
1962 struct rte_tm_error *error)
1964 struct pmd_internals *p = dev->data->dev_private;
1965 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1971 if (p->soft.tm.hierarchy_frozen)
1972 return -rte_tm_error_set(error,
1974 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1976 rte_strerror(EBUSY));
1978 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1979 level_id, params, error);
1983 /* Memory allocation */
1984 n = calloc(1, sizeof(struct tm_node));
1986 return -rte_tm_error_set(error,
1988 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1990 rte_strerror(ENOMEM));
1993 n->node_id = node_id;
1994 n->parent_node_id = parent_node_id;
1995 n->priority = priority;
1998 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1999 n->parent_node = tm_node_search(dev, parent_node_id);
2000 n->level = n->parent_node->level + 1;
2003 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
2004 n->shaper_profile = tm_shaper_profile_search(dev,
2005 params->shaper_profile_id);
2007 if (n->level == TM_NODE_LEVEL_QUEUE &&
2008 params->leaf.cman == RTE_TM_CMAN_WRED)
2009 n->wred_profile = tm_wred_profile_search(dev,
2010 params->leaf.wred.wred_profile_id);
2012 memcpy(&n->params, params, sizeof(n->params));
2015 TAILQ_INSERT_TAIL(nl, n, node);
2016 p->soft.tm.h.n_nodes++;
2018 /* Update dependencies */
2020 n->parent_node->n_children++;
2022 if (n->shaper_profile)
2023 n->shaper_profile->n_users++;
2025 for (i = 0; i < params->n_shared_shapers; i++) {
2026 struct tm_shared_shaper *ss;
2028 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
2032 if (n->wred_profile)
2033 n->wred_profile->n_users++;
2035 p->soft.tm.h.n_tm_nodes[n->level]++;
2040 /* Traffic manager node delete */
2042 pmd_tm_node_delete(struct rte_eth_dev *dev,
2044 struct rte_tm_error *error)
2046 struct pmd_internals *p = dev->data->dev_private;
2050 /* Check hierarchy changes are currently allowed */
2051 if (p->soft.tm.hierarchy_frozen)
2052 return -rte_tm_error_set(error,
2054 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2056 rte_strerror(EBUSY));
2058 /* Check existing */
2059 n = tm_node_search(dev, node_id);
2061 return -rte_tm_error_set(error,
2063 RTE_TM_ERROR_TYPE_NODE_ID,
2065 rte_strerror(EINVAL));
2069 return -rte_tm_error_set(error,
2071 RTE_TM_ERROR_TYPE_NODE_ID,
2073 rte_strerror(EBUSY));
2075 /* Update dependencies */
2076 p->soft.tm.h.n_tm_nodes[n->level]--;
2078 if (n->wred_profile)
2079 n->wred_profile->n_users--;
2081 for (i = 0; i < n->params.n_shared_shapers; i++) {
2082 struct tm_shared_shaper *ss;
2084 ss = tm_shared_shaper_search(dev,
2085 n->params.shared_shaper_id[i]);
2089 if (n->shaper_profile)
2090 n->shaper_profile->n_users--;
2093 n->parent_node->n_children--;
2095 /* Remove from list */
2096 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2097 p->soft.tm.h.n_nodes--;
2105 pipe_profile_build(struct rte_eth_dev *dev,
2107 struct rte_sched_pipe_params *pp)
2109 struct pmd_internals *p = dev->data->dev_private;
2110 struct tm_hierarchy *h = &p->soft.tm.h;
2111 struct tm_node_list *nl = &h->nodes;
2112 struct tm_node *nt, *nq;
2114 memset(pp, 0, sizeof(*pp));
2117 pp->tb_rate = np->shaper_profile->params.peak.rate;
2118 pp->tb_size = np->shaper_profile->params.peak.size;
2120 /* Traffic Class (TC) */
2121 pp->tc_period = PIPE_TC_PERIOD;
2123 pp->tc_ov_weight = np->weight;
2125 TAILQ_FOREACH(nt, nl, node) {
2126 uint32_t queue_id = 0;
2128 if (nt->level != TM_NODE_LEVEL_TC ||
2129 nt->parent_node_id != np->node_id)
2132 pp->tc_rate[nt->priority] =
2133 nt->shaper_profile->params.peak.rate;
2136 TAILQ_FOREACH(nq, nl, node) {
2138 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2139 nq->parent_node_id != nt->node_id)
2142 if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
2143 pp->wrr_weights[queue_id] = nq->weight;
2151 pipe_profile_free_exists(struct rte_eth_dev *dev,
2152 uint32_t *pipe_profile_id)
2154 struct pmd_internals *p = dev->data->dev_private;
2155 struct tm_params *t = &p->soft.tm.params;
2157 if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
2158 *pipe_profile_id = t->n_pipe_profiles;
2166 pipe_profile_exists(struct rte_eth_dev *dev,
2167 struct rte_sched_pipe_params *pp,
2168 uint32_t *pipe_profile_id)
2170 struct pmd_internals *p = dev->data->dev_private;
2171 struct tm_params *t = &p->soft.tm.params;
2174 for (i = 0; i < t->n_pipe_profiles; i++)
2175 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2176 if (pipe_profile_id)
2177 *pipe_profile_id = i;
2185 pipe_profile_install(struct rte_eth_dev *dev,
2186 struct rte_sched_pipe_params *pp,
2187 uint32_t pipe_profile_id)
2189 struct pmd_internals *p = dev->data->dev_private;
2190 struct tm_params *t = &p->soft.tm.params;
2192 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2193 t->n_pipe_profiles++;
2197 pipe_profile_mark(struct rte_eth_dev *dev,
2198 uint32_t subport_id,
2200 uint32_t pipe_profile_id)
2202 struct pmd_internals *p = dev->data->dev_private;
2203 struct tm_hierarchy *h = &p->soft.tm.h;
2204 struct tm_params *t = &p->soft.tm.params;
2205 uint32_t n_pipes_per_subport, pos;
2207 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2208 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2209 pos = subport_id * n_pipes_per_subport + pipe_id;
2211 t->pipe_to_profile[pos] = pipe_profile_id;
2214 static struct rte_sched_pipe_params *
2215 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2217 struct pmd_internals *p = dev->data->dev_private;
2218 struct tm_hierarchy *h = &p->soft.tm.h;
2219 struct tm_params *t = &p->soft.tm.params;
2220 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2221 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2223 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2224 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2226 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2227 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2229 return &t->pipe_profiles[pipe_profile_id];
2233 pipe_profiles_generate(struct rte_eth_dev *dev)
2235 struct pmd_internals *p = dev->data->dev_private;
2236 struct tm_hierarchy *h = &p->soft.tm.h;
2237 struct tm_node_list *nl = &h->nodes;
2238 struct tm_node *ns, *np;
2239 uint32_t subport_id;
2241 /* Objective: Fill in the following fields in struct tm_params:
2248 TAILQ_FOREACH(ns, nl, node) {
2251 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2255 TAILQ_FOREACH(np, nl, node) {
2256 struct rte_sched_pipe_params pp;
2259 memset(&pp, 0, sizeof(pp));
2261 if (np->level != TM_NODE_LEVEL_PIPE ||
2262 np->parent_node_id != ns->node_id)
2265 pipe_profile_build(dev, np, &pp);
2267 if (!pipe_profile_exists(dev, &pp, &pos)) {
2268 if (!pipe_profile_free_exists(dev, &pos))
2271 pipe_profile_install(dev, &pp, pos);
2274 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2285 static struct tm_wred_profile *
2286 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2288 struct pmd_internals *p = dev->data->dev_private;
2289 struct tm_hierarchy *h = &p->soft.tm.h;
2290 struct tm_node_list *nl = &h->nodes;
2293 TAILQ_FOREACH(nq, nl, node) {
2294 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2295 nq->parent_node->priority != tc_id)
2298 return nq->wred_profile;
2304 #ifdef RTE_SCHED_RED
2307 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
2309 struct pmd_internals *p = dev->data->dev_private;
2310 struct rte_sched_subport_params *pp =
2311 &p->soft.tm.params.subport_params[subport_id];
2314 enum rte_color color;
2316 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2317 for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
2318 struct rte_red_params *dst =
2319 &pp->red_params[tc_id][color];
2320 struct tm_wred_profile *src_wp =
2321 tm_tc_wred_profile_get(dev, tc_id);
2322 struct rte_tm_red_params *src =
2323 &src_wp->params.red_params[color];
2325 memcpy(dst, src, sizeof(*dst));
2331 #define wred_profiles_set(dev, subport_id)
2335 static struct tm_shared_shaper *
2336 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2338 return (tc_node->params.n_shared_shapers) ?
2339 tm_shared_shaper_search(dev,
2340 tc_node->params.shared_shaper_id[0]) :
2344 static struct tm_shared_shaper *
2345 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2346 struct tm_node *subport_node,
2349 struct pmd_internals *p = dev->data->dev_private;
2350 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2353 TAILQ_FOREACH(n, nl, node) {
2354 if (n->level != TM_NODE_LEVEL_TC ||
2355 n->parent_node->parent_node_id !=
2356 subport_node->node_id ||
2357 n->priority != tc_id)
2360 return tm_tc_shared_shaper_get(dev, n);
2366 static struct rte_sched_subport_profile_params *
2367 subport_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2369 struct pmd_internals *p = dev->data->dev_private;
2370 struct tm_params *t = &p->soft.tm.params;
2371 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2373 return &t->subport_profile[subport_id];
2377 subport_profile_mark(struct rte_eth_dev *dev,
2378 uint32_t subport_id,
2379 uint32_t subport_profile_id)
2381 struct pmd_internals *p = dev->data->dev_private;
2382 struct tm_params *t = &p->soft.tm.params;
2384 t->subport_to_profile[subport_id] = subport_profile_id;
2388 subport_profile_install(struct rte_eth_dev *dev,
2389 struct rte_sched_subport_profile_params *sp,
2390 uint32_t subport_profile_id)
2392 struct pmd_internals *p = dev->data->dev_private;
2393 struct tm_params *t = &p->soft.tm.params;
2395 memcpy(&t->subport_profile[subport_profile_id],
2397 t->n_subport_profiles++;
2401 subport_profile_free_exists(struct rte_eth_dev *dev,
2402 uint32_t *subport_profile_id)
2404 struct pmd_internals *p = dev->data->dev_private;
2405 struct tm_params *t = &p->soft.tm.params;
2407 if (t->n_subport_profiles < TM_MAX_SUBPORT_PROFILE) {
2408 *subport_profile_id = t->n_subport_profiles;
2416 subport_profile_build(struct rte_eth_dev *dev, struct tm_node *np,
2417 struct rte_sched_subport_profile_params *sp)
2420 memset(sp, 0, sizeof(*sp));
2422 sp->tb_rate = np->shaper_profile->params.peak.rate;
2423 sp->tb_size = np->shaper_profile->params.peak.size;
2425 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2426 struct tm_shared_shaper *ss;
2427 struct tm_shaper_profile *ssp;
2429 ss = tm_subport_tc_shared_shaper_get(dev, np, i);
2430 ssp = (ss) ? tm_shaper_profile_search(dev,
2431 ss->shaper_profile_id) :
2433 sp->tc_rate[i] = ssp->params.peak.rate;
2436 /* Traffic Class (TC) */
2437 sp->tc_period = SUBPORT_TC_PERIOD;
2441 subport_profiles_generate(struct rte_eth_dev *dev)
2443 struct pmd_internals *p = dev->data->dev_private;
2444 struct tm_hierarchy *h = &p->soft.tm.h;
2445 struct tm_node_list *nl = &h->nodes;
2447 uint32_t subport_id;
2449 /* Objective: Fill in the following fields in struct tm_params:
2450 * - subport_profiles
2451 * - n_subport_profiles
2452 * - subport_to_profile
2456 TAILQ_FOREACH(ns, nl, node) {
2457 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2460 struct rte_sched_subport_profile_params sp;
2463 memset(&sp, 0, sizeof(sp));
2465 subport_profile_build(dev, ns, &sp);
2467 if (!subport_profile_exists(dev, &sp, &pos)) {
2468 if (!subport_profile_free_exists(dev, &pos))
2471 subport_profile_install(dev, &sp, pos);
2474 subport_profile_mark(dev, subport_id, pos);
2484 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2486 struct pmd_internals *p = dev->data->dev_private;
2487 struct tm_hierarchy *h = &p->soft.tm.h;
2488 struct tm_node_list *nl = &h->nodes;
2489 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2490 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2491 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2492 struct tm_shared_shaper *ss;
2494 uint32_t n_pipes_per_subport;
2496 /* Root node exists. */
2498 return -rte_tm_error_set(error,
2500 RTE_TM_ERROR_TYPE_LEVEL_ID,
2502 rte_strerror(EINVAL));
2504 /* There is at least one subport, max is not exceeded. */
2505 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2506 return -rte_tm_error_set(error,
2508 RTE_TM_ERROR_TYPE_LEVEL_ID,
2510 rte_strerror(EINVAL));
2512 /* There is at least one pipe. */
2513 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2514 return -rte_tm_error_set(error,
2516 RTE_TM_ERROR_TYPE_LEVEL_ID,
2518 rte_strerror(EINVAL));
2520 /* Number of pipes is the same for all subports. Maximum number of pipes
2521 * per subport is not exceeded.
2523 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2524 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2526 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2527 return -rte_tm_error_set(error,
2529 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2531 rte_strerror(EINVAL));
2533 TAILQ_FOREACH(ns, nl, node) {
2534 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2537 if (ns->n_children != n_pipes_per_subport)
2538 return -rte_tm_error_set(error,
2540 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2542 rte_strerror(EINVAL));
2545 /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
2546 TAILQ_FOREACH(np, nl, node) {
2547 uint32_t mask = 0, mask_expected =
2548 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2551 if (np->level != TM_NODE_LEVEL_PIPE)
2554 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2555 return -rte_tm_error_set(error,
2557 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2559 rte_strerror(EINVAL));
2561 TAILQ_FOREACH(nt, nl, node) {
2562 if (nt->level != TM_NODE_LEVEL_TC ||
2563 nt->parent_node_id != np->node_id)
2566 mask |= 1 << nt->priority;
2569 if (mask != mask_expected)
2570 return -rte_tm_error_set(error,
2572 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2574 rte_strerror(EINVAL));
2577 /** Each Strict priority TC has exactly 1 packet queues while
2578 * lowest priority TC (Best-effort) has 4 queues.
2580 TAILQ_FOREACH(nt, nl, node) {
2581 if (nt->level != TM_NODE_LEVEL_TC)
2584 if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
2585 return -rte_tm_error_set(error,
2587 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2589 rte_strerror(EINVAL));
2594 * -For each TC #i, all pipes in the same subport use the same
2595 * shared shaper (or no shared shaper) for their TC#i.
2596 * -Each shared shaper needs to have at least one user. All its
2597 * users have to be TC nodes with the same priority and the same
2600 TAILQ_FOREACH(ns, nl, node) {
2601 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2604 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2607 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2608 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2610 TAILQ_FOREACH(nt, nl, node) {
2611 struct tm_shared_shaper *subport_ss, *tc_ss;
2613 if (nt->level != TM_NODE_LEVEL_TC ||
2614 nt->parent_node->parent_node_id !=
2618 subport_ss = s[nt->priority];
2619 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2621 if (subport_ss == NULL && tc_ss == NULL)
2624 if ((subport_ss == NULL && tc_ss != NULL) ||
2625 (subport_ss != NULL && tc_ss == NULL) ||
2626 subport_ss->shared_shaper_id !=
2627 tc_ss->shared_shaper_id)
2628 return -rte_tm_error_set(error,
2630 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2632 rte_strerror(EINVAL));
2636 TAILQ_FOREACH(ss, ssl, node) {
2637 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2638 uint32_t n_users = 0;
2641 TAILQ_FOREACH(nt, nl, node) {
2642 if (nt->level != TM_NODE_LEVEL_TC ||
2643 nt->priority != nt_any->priority ||
2644 nt->parent_node->parent_node_id !=
2645 nt_any->parent_node->parent_node_id)
2651 if (ss->n_users == 0 || ss->n_users != n_users)
2652 return -rte_tm_error_set(error,
2654 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2656 rte_strerror(EINVAL));
2659 /* Not too many subport profiles. */
2660 if (subport_profiles_generate(dev))
2661 return -rte_tm_error_set(error,
2663 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2665 rte_strerror(EINVAL));
2668 /* Not too many pipe profiles. */
2669 if (pipe_profiles_generate(dev))
2670 return -rte_tm_error_set(error,
2672 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2674 rte_strerror(EINVAL));
2677 * WRED (when used, i.e. at least one WRED profile defined):
2678 * -Each WRED profile must have at least one user.
2679 * -All leaf nodes must have their private WRED context enabled.
2680 * -For each TC #i, all leaf nodes must use the same WRED profile
2681 * for their private WRED context.
2683 if (h->n_wred_profiles) {
2684 struct tm_wred_profile *wp;
2685 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2688 TAILQ_FOREACH(wp, wpl, node)
2689 if (wp->n_users == 0)
2690 return -rte_tm_error_set(error,
2692 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2694 rte_strerror(EINVAL));
2696 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2697 w[id] = tm_tc_wred_profile_get(dev, id);
2700 return -rte_tm_error_set(error,
2702 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2704 rte_strerror(EINVAL));
2707 TAILQ_FOREACH(nq, nl, node) {
2710 if (nq->level != TM_NODE_LEVEL_QUEUE)
2713 id = nq->parent_node->priority;
2715 if (nq->wred_profile == NULL ||
2716 nq->wred_profile->wred_profile_id !=
2717 w[id]->wred_profile_id)
2718 return -rte_tm_error_set(error,
2720 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2722 rte_strerror(EINVAL));
2730 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2732 struct pmd_internals *p = dev->data->dev_private;
2733 struct tm_params *t = &p->soft.tm.params;
2734 struct tm_hierarchy *h = &p->soft.tm.h;
2736 struct tm_node_list *nl = &h->nodes;
2737 struct tm_node *root = tm_root_node_present(dev), *n;
2739 uint32_t subport_id;
2741 t->port_params = (struct rte_sched_port_params) {
2742 .name = dev->data->name,
2743 .socket = dev->data->numa_node,
2744 .rate = root->shaper_profile->params.peak.rate,
2745 .mtu = dev->data->mtu,
2747 root->shaper_profile->params.pkt_length_adjust,
2748 .n_subports_per_port = root->n_children,
2749 .n_subport_profiles = t->n_subport_profiles,
2750 .subport_profiles = t->subport_profile,
2751 .n_max_subport_profiles = TM_MAX_SUBPORT_PROFILE,
2752 .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
2756 TAILQ_FOREACH(n, nl, node) {
2758 if (n->level != TM_NODE_LEVEL_SUBPORT)
2761 t->subport_params[subport_id] =
2762 (struct rte_sched_subport_params) {
2763 .n_pipes_per_subport_enabled =
2764 h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2765 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2766 .qsize = {p->params.tm.qsize[0],
2767 p->params.tm.qsize[1],
2768 p->params.tm.qsize[2],
2769 p->params.tm.qsize[3],
2770 p->params.tm.qsize[4],
2771 p->params.tm.qsize[5],
2772 p->params.tm.qsize[6],
2773 p->params.tm.qsize[7],
2774 p->params.tm.qsize[8],
2775 p->params.tm.qsize[9],
2776 p->params.tm.qsize[10],
2777 p->params.tm.qsize[11],
2778 p->params.tm.qsize[12],
2780 .pipe_profiles = t->pipe_profiles,
2781 .n_pipe_profiles = t->n_pipe_profiles,
2782 .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
2784 wred_profiles_set(dev, subport_id);
2789 /* Traffic manager hierarchy commit */
2791 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2793 struct rte_tm_error *error)
2795 struct pmd_internals *p = dev->data->dev_private;
2799 if (p->soft.tm.hierarchy_frozen)
2800 return -rte_tm_error_set(error,
2802 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2804 rte_strerror(EBUSY));
2806 status = hierarchy_commit_check(dev, error);
2809 tm_hierarchy_free(p);
2814 /* Create blueprints */
2815 hierarchy_blueprints_create(dev);
2817 /* Freeze hierarchy */
2818 p->soft.tm.hierarchy_frozen = 1;
2823 #ifdef RTE_SCHED_SUBPORT_TC_OV
2826 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2828 struct pmd_internals *p = dev->data->dev_private;
2829 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2831 struct tm_node *ns = np->parent_node;
2832 uint32_t subport_id = tm_node_subport_id(dev, ns);
2834 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2835 struct rte_sched_pipe_params profile1;
2836 uint32_t pipe_profile_id;
2838 /* Derive new pipe profile. */
2839 memcpy(&profile1, profile0, sizeof(profile1));
2840 profile1.tc_ov_weight = (uint8_t)weight;
2842 /* Since implementation does not allow adding more pipe profiles after
2843 * port configuration, the pipe configuration can be successfully
2844 * updated only if the new profile is also part of the existing set of
2847 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2850 /* Update the pipe profile used by the current pipe. */
2851 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2852 (int32_t)pipe_profile_id))
2855 /* Commit changes. */
2856 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2857 np->weight = weight;
2865 update_queue_weight(struct rte_eth_dev *dev,
2866 struct tm_node *nq, uint32_t weight)
2868 struct pmd_internals *p = dev->data->dev_private;
2869 uint32_t queue_id = tm_node_queue_id(dev, nq);
2871 struct tm_node *nt = nq->parent_node;
2873 struct tm_node *np = nt->parent_node;
2874 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2876 struct tm_node *ns = np->parent_node;
2877 uint32_t subport_id = tm_node_subport_id(dev, ns);
2879 uint32_t pipe_be_queue_id =
2880 queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
2882 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2883 struct rte_sched_pipe_params profile1;
2884 uint32_t pipe_profile_id;
2886 /* Derive new pipe profile. */
2887 memcpy(&profile1, profile0, sizeof(profile1));
2888 profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
2890 /* Since implementation does not allow adding more pipe profiles after
2891 * port configuration, the pipe configuration can be successfully
2892 * updated only if the new profile is also part of the existing set
2895 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2898 /* Update the pipe profile used by the current pipe. */
2899 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
2900 (int32_t)pipe_profile_id))
2903 /* Commit changes. */
2904 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2905 nq->weight = weight;
2910 /* Traffic manager node parent update */
2912 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2914 uint32_t parent_node_id,
2917 struct rte_tm_error *error)
2921 /* Port must be started and TM used. */
2922 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2923 return -rte_tm_error_set(error,
2925 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2927 rte_strerror(EBUSY));
2929 /* Node must be valid */
2930 n = tm_node_search(dev, node_id);
2932 return -rte_tm_error_set(error,
2934 RTE_TM_ERROR_TYPE_NODE_ID,
2936 rte_strerror(EINVAL));
2938 /* Parent node must be the same */
2939 if (n->parent_node_id != parent_node_id)
2940 return -rte_tm_error_set(error,
2942 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2944 rte_strerror(EINVAL));
2946 /* Priority must be the same */
2947 if (n->priority != priority)
2948 return -rte_tm_error_set(error,
2950 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2952 rte_strerror(EINVAL));
2954 /* weight: must be 1 .. 255 */
2955 if (weight == 0 || weight >= UINT8_MAX)
2956 return -rte_tm_error_set(error,
2958 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2960 rte_strerror(EINVAL));
2963 case TM_NODE_LEVEL_PORT:
2964 return -rte_tm_error_set(error,
2966 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2968 rte_strerror(EINVAL));
2970 case TM_NODE_LEVEL_SUBPORT:
2971 return -rte_tm_error_set(error,
2973 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2975 rte_strerror(EINVAL));
2977 case TM_NODE_LEVEL_PIPE:
2978 #ifdef RTE_SCHED_SUBPORT_TC_OV
2979 if (update_pipe_weight(dev, n, weight))
2980 return -rte_tm_error_set(error,
2982 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2984 rte_strerror(EINVAL));
2987 return -rte_tm_error_set(error,
2989 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2991 rte_strerror(EINVAL));
2994 case TM_NODE_LEVEL_TC:
2995 return -rte_tm_error_set(error,
2997 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2999 rte_strerror(EINVAL));
3001 case TM_NODE_LEVEL_QUEUE:
3004 if (update_queue_weight(dev, n, weight))
3005 return -rte_tm_error_set(error,
3007 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3009 rte_strerror(EINVAL));
3015 update_subport_rate(struct rte_eth_dev *dev,
3017 struct tm_shaper_profile *sp)
3019 struct pmd_internals *p = dev->data->dev_private;
3020 uint32_t subport_id = tm_node_subport_id(dev, ns);
3022 struct rte_sched_subport_profile_params *profile0 =
3023 subport_profile_get(dev, ns);
3024 struct rte_sched_subport_profile_params profile1;
3025 uint32_t subport_profile_id;
3027 /* Derive new pipe profile. */
3028 memcpy(&profile1, profile0, sizeof(profile1));
3029 profile1.tb_rate = sp->params.peak.rate;
3030 profile1.tb_size = sp->params.peak.size;
3032 /* Since implementation does not allow adding more subport profiles
3033 * after port configuration, the pipe configuration can be successfully
3034 * updated only if the new profile is also part of the existing set of
3037 if (subport_profile_exists(dev, &profile1, &subport_profile_id) == 0)
3040 /* Update the subport configuration. */
3041 if (rte_sched_subport_config(SCHED(p), subport_id,
3042 NULL, subport_profile_id))
3045 /* Commit changes. */
3046 ns->shaper_profile->n_users--;
3048 ns->shaper_profile = sp;
3049 ns->params.shaper_profile_id = sp->shaper_profile_id;
3052 subport_profile_mark(dev, subport_id, subport_profile_id);
3054 memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
3062 update_pipe_rate(struct rte_eth_dev *dev,
3064 struct tm_shaper_profile *sp)
3066 struct pmd_internals *p = dev->data->dev_private;
3067 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3069 struct tm_node *ns = np->parent_node;
3070 uint32_t subport_id = tm_node_subport_id(dev, ns);
3072 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
3073 struct rte_sched_pipe_params profile1;
3074 uint32_t pipe_profile_id;
3076 /* Derive new pipe profile. */
3077 memcpy(&profile1, profile0, sizeof(profile1));
3078 profile1.tb_rate = sp->params.peak.rate;
3079 profile1.tb_size = sp->params.peak.size;
3081 /* Since implementation does not allow adding more pipe profiles after
3082 * port configuration, the pipe configuration can be successfully
3083 * updated only if the new profile is also part of the existing set of
3086 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3089 /* Update the pipe profile used by the current pipe. */
3090 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3091 (int32_t)pipe_profile_id))
3094 /* Commit changes. */
3095 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3096 np->shaper_profile->n_users--;
3097 np->shaper_profile = sp;
3098 np->params.shaper_profile_id = sp->shaper_profile_id;
3105 update_tc_rate(struct rte_eth_dev *dev,
3107 struct tm_shaper_profile *sp)
3109 struct pmd_internals *p = dev->data->dev_private;
3110 uint32_t tc_id = tm_node_tc_id(dev, nt);
3112 struct tm_node *np = nt->parent_node;
3113 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3115 struct tm_node *ns = np->parent_node;
3116 uint32_t subport_id = tm_node_subport_id(dev, ns);
3118 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
3119 struct rte_sched_pipe_params profile1;
3120 uint32_t pipe_profile_id;
3122 /* Derive new pipe profile. */
3123 memcpy(&profile1, profile0, sizeof(profile1));
3124 profile1.tc_rate[tc_id] = sp->params.peak.rate;
3126 /* Since implementation does not allow adding more pipe profiles after
3127 * port configuration, the pipe configuration can be successfully
3128 * updated only if the new profile is also part of the existing set of
3131 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
3134 /* Update the pipe profile used by the current pipe. */
3135 if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
3136 (int32_t)pipe_profile_id))
3139 /* Commit changes. */
3140 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
3141 nt->shaper_profile->n_users--;
3142 nt->shaper_profile = sp;
3143 nt->params.shaper_profile_id = sp->shaper_profile_id;
3149 /* Traffic manager node shaper update */
3151 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
3153 uint32_t shaper_profile_id,
3154 struct rte_tm_error *error)
3157 struct tm_shaper_profile *sp;
3159 /* Port must be started and TM used. */
3160 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3161 return -rte_tm_error_set(error,
3163 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3165 rte_strerror(EBUSY));
3167 /* Node must be valid */
3168 n = tm_node_search(dev, node_id);
3170 return -rte_tm_error_set(error,
3172 RTE_TM_ERROR_TYPE_NODE_ID,
3174 rte_strerror(EINVAL));
3176 /* Shaper profile must be valid. */
3177 sp = tm_shaper_profile_search(dev, shaper_profile_id);
3179 return -rte_tm_error_set(error,
3181 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
3183 rte_strerror(EINVAL));
3186 case TM_NODE_LEVEL_PORT:
3187 return -rte_tm_error_set(error,
3189 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3191 rte_strerror(EINVAL));
3193 case TM_NODE_LEVEL_SUBPORT:
3194 if (update_subport_rate(dev, n, sp))
3195 return -rte_tm_error_set(error,
3197 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3199 rte_strerror(EINVAL));
3202 case TM_NODE_LEVEL_PIPE:
3203 if (update_pipe_rate(dev, n, sp))
3204 return -rte_tm_error_set(error,
3206 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3208 rte_strerror(EINVAL));
3211 case TM_NODE_LEVEL_TC:
3212 if (update_tc_rate(dev, n, sp))
3213 return -rte_tm_error_set(error,
3215 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3217 rte_strerror(EINVAL));
3220 case TM_NODE_LEVEL_QUEUE:
3223 return -rte_tm_error_set(error,
3225 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3227 rte_strerror(EINVAL));
3231 static inline uint32_t
3232 tm_port_queue_id(struct rte_eth_dev *dev,
3233 uint32_t port_subport_id,
3234 uint32_t subport_pipe_id,
3235 uint32_t pipe_tc_id,
3236 uint32_t tc_queue_id)
3238 struct pmd_internals *p = dev->data->dev_private;
3239 struct tm_hierarchy *h = &p->soft.tm.h;
3240 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3241 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3243 uint32_t port_pipe_id =
3244 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3246 uint32_t port_queue_id =
3247 port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
3249 return port_queue_id;
3253 read_port_stats(struct rte_eth_dev *dev,
3255 struct rte_tm_node_stats *stats,
3256 uint64_t *stats_mask,
3259 struct pmd_internals *p = dev->data->dev_private;
3260 struct tm_hierarchy *h = &p->soft.tm.h;
3261 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3262 uint32_t subport_id;
3264 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3265 struct rte_sched_subport_stats s;
3269 int status = rte_sched_subport_read_stats(SCHED(p),
3276 /* Stats accumulate */
3277 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3279 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3280 nr->stats.n_bytes +=
3281 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3282 nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3283 s.n_pkts_tc_dropped[id];
3284 nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3285 s.n_bytes_tc_dropped[id];
3291 memcpy(stats, &nr->stats, sizeof(*stats));
3294 *stats_mask = STATS_MASK_DEFAULT;
3298 memset(&nr->stats, 0, sizeof(nr->stats));
3304 read_subport_stats(struct rte_eth_dev *dev,
3306 struct rte_tm_node_stats *stats,
3307 uint64_t *stats_mask,
3310 struct pmd_internals *p = dev->data->dev_private;
3311 uint32_t subport_id = tm_node_subport_id(dev, ns);
3312 struct rte_sched_subport_stats s;
3313 uint32_t tc_ov, tc_id;
3316 int status = rte_sched_subport_read_stats(SCHED(p),
3323 /* Stats accumulate */
3324 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3326 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3327 ns->stats.n_bytes +=
3328 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3329 ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3330 s.n_pkts_tc_dropped[tc_id];
3331 ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3332 s.n_bytes_tc_dropped[tc_id];
3337 memcpy(stats, &ns->stats, sizeof(*stats));
3340 *stats_mask = STATS_MASK_DEFAULT;
3344 memset(&ns->stats, 0, sizeof(ns->stats));
3350 read_pipe_stats(struct rte_eth_dev *dev,
3352 struct rte_tm_node_stats *stats,
3353 uint64_t *stats_mask,
3356 struct pmd_internals *p = dev->data->dev_private;
3358 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3360 struct tm_node *ns = np->parent_node;
3361 uint32_t subport_id = tm_node_subport_id(dev, ns);
3362 uint32_t tc_id, queue_id;
3366 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3367 struct rte_sched_queue_stats s;
3370 if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
3374 tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
3375 queue_id = i - tc_id;
3378 uint32_t qid = tm_port_queue_id(dev,
3384 int status = rte_sched_queue_read_stats(SCHED(p),
3391 /* Stats accumulate */
3392 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3393 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3394 np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3395 np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3397 np->stats.leaf.n_pkts_queued = qlen;
3402 memcpy(stats, &np->stats, sizeof(*stats));
3405 *stats_mask = STATS_MASK_DEFAULT;
3409 memset(&np->stats, 0, sizeof(np->stats));
3415 read_tc_stats(struct rte_eth_dev *dev,
3417 struct rte_tm_node_stats *stats,
3418 uint64_t *stats_mask,
3421 struct pmd_internals *p = dev->data->dev_private;
3423 uint32_t tc_id = tm_node_tc_id(dev, nt);
3425 struct tm_node *np = nt->parent_node;
3426 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3428 struct tm_node *ns = np->parent_node;
3429 uint32_t subport_id = tm_node_subport_id(dev, ns);
3430 struct rte_sched_queue_stats s;
3436 if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
3437 qid = tm_port_queue_id(dev,
3443 status = rte_sched_queue_read_stats(SCHED(p),
3450 /* Stats accumulate */
3451 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3452 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3453 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3454 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3456 nt->stats.leaf.n_pkts_queued = qlen;
3458 for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
3459 qid = tm_port_queue_id(dev,
3465 status = rte_sched_queue_read_stats(SCHED(p),
3472 /* Stats accumulate */
3473 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3474 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3475 nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
3477 nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3479 nt->stats.leaf.n_pkts_queued = qlen;
3485 memcpy(stats, &nt->stats, sizeof(*stats));
3488 *stats_mask = STATS_MASK_DEFAULT;
3492 memset(&nt->stats, 0, sizeof(nt->stats));
3498 read_queue_stats(struct rte_eth_dev *dev,
3500 struct rte_tm_node_stats *stats,
3501 uint64_t *stats_mask,
3504 struct pmd_internals *p = dev->data->dev_private;
3505 struct rte_sched_queue_stats s;
3508 uint32_t queue_id = tm_node_queue_id(dev, nq);
3510 struct tm_node *nt = nq->parent_node;
3511 uint32_t tc_id = tm_node_tc_id(dev, nt);
3513 struct tm_node *np = nt->parent_node;
3514 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3516 struct tm_node *ns = np->parent_node;
3517 uint32_t subport_id = tm_node_subport_id(dev, ns);
3520 uint32_t qid = tm_port_queue_id(dev,
3526 int status = rte_sched_queue_read_stats(SCHED(p),
3533 /* Stats accumulate */
3534 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3535 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3536 nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
3537 nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
3539 nq->stats.leaf.n_pkts_queued = qlen;
3543 memcpy(stats, &nq->stats, sizeof(*stats));
3546 *stats_mask = STATS_MASK_QUEUE;
3550 memset(&nq->stats, 0, sizeof(nq->stats));
3555 /* Traffic manager read stats counters for specific node */
3557 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3559 struct rte_tm_node_stats *stats,
3560 uint64_t *stats_mask,
3562 struct rte_tm_error *error)
3566 /* Port must be started and TM used. */
3567 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3568 return -rte_tm_error_set(error,
3570 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3572 rte_strerror(EBUSY));
3574 /* Node must be valid */
3575 n = tm_node_search(dev, node_id);
3577 return -rte_tm_error_set(error,
3579 RTE_TM_ERROR_TYPE_NODE_ID,
3581 rte_strerror(EINVAL));
3584 case TM_NODE_LEVEL_PORT:
3585 if (read_port_stats(dev, n, stats, stats_mask, clear))
3586 return -rte_tm_error_set(error,
3588 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3590 rte_strerror(EINVAL));
3593 case TM_NODE_LEVEL_SUBPORT:
3594 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3595 return -rte_tm_error_set(error,
3597 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3599 rte_strerror(EINVAL));
3602 case TM_NODE_LEVEL_PIPE:
3603 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3604 return -rte_tm_error_set(error,
3606 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3608 rte_strerror(EINVAL));
3611 case TM_NODE_LEVEL_TC:
3612 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3613 return -rte_tm_error_set(error,
3615 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3617 rte_strerror(EINVAL));
3620 case TM_NODE_LEVEL_QUEUE:
3622 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3623 return -rte_tm_error_set(error,
3625 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3627 rte_strerror(EINVAL));
3632 const struct rte_tm_ops pmd_tm_ops = {
3633 .node_type_get = pmd_tm_node_type_get,
3634 .capabilities_get = pmd_tm_capabilities_get,
3635 .level_capabilities_get = pmd_tm_level_capabilities_get,
3636 .node_capabilities_get = pmd_tm_node_capabilities_get,
3638 .wred_profile_add = pmd_tm_wred_profile_add,
3639 .wred_profile_delete = pmd_tm_wred_profile_delete,
3640 .shared_wred_context_add_update = NULL,
3641 .shared_wred_context_delete = NULL,
3643 .shaper_profile_add = pmd_tm_shaper_profile_add,
3644 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3645 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3646 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3648 .node_add = pmd_tm_node_add,
3649 .node_delete = pmd_tm_node_delete,
3650 .node_suspend = NULL,
3651 .node_resume = NULL,
3652 .hierarchy_commit = pmd_tm_hierarchy_commit,
3654 .node_parent_update = pmd_tm_node_parent_update,
3655 .node_shaper_update = pmd_tm_node_shaper_update,
3656 .node_shared_shaper_update = NULL,
3657 .node_stats_update = NULL,
3658 .node_wfq_weight_mode_update = NULL,
3659 .node_cman_update = NULL,
3660 .node_wred_context_update = NULL,
3661 .node_shared_wred_context_update = NULL,
3663 .node_stats_read = pmd_tm_node_stats_read,