1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_malloc.h>
11 #include "rte_eth_softnic_internals.h"
12 #include "rte_eth_softnic.h"
14 #define BYTES_IN_MBPS (1000 * 1000 / 8)
15 #define SUBPORT_TC_PERIOD 10
16 #define PIPE_TC_PERIOD 40
19 tm_hierarchy_init(struct pmd_internals *p)
21 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
23 /* Initialize shaper profile list */
24 TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
26 /* Initialize shared shaper list */
27 TAILQ_INIT(&p->soft.tm.h.shared_shapers);
29 /* Initialize wred profile list */
30 TAILQ_INIT(&p->soft.tm.h.wred_profiles);
32 /* Initialize TM node list */
33 TAILQ_INIT(&p->soft.tm.h.nodes);
37 tm_hierarchy_uninit(struct pmd_internals *p)
41 struct tm_node *tm_node;
43 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
47 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
51 /* Remove all WRED profiles */
53 struct tm_wred_profile *wred_profile;
55 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
56 if (wred_profile == NULL)
59 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
63 /* Remove all shared shapers */
65 struct tm_shared_shaper *shared_shaper;
67 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
68 if (shared_shaper == NULL)
71 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
75 /* Remove all shaper profiles */
77 struct tm_shaper_profile *shaper_profile;
79 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
80 if (shaper_profile == NULL)
83 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
84 shaper_profile, node);
88 memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
92 tm_init(struct pmd_internals *p,
93 struct pmd_params *params __rte_unused,
94 int numa_node __rte_unused)
102 tm_free(struct pmd_internals *p)
104 tm_hierarchy_uninit(p);
108 tm_start(struct pmd_internals *p)
110 struct tm_params *t = &p->soft.tm.params;
111 uint32_t n_subports, subport_id;
114 /* Is hierarchy frozen? */
115 if (p->soft.tm.hierarchy_frozen == 0)
119 p->soft.tm.sched = rte_sched_port_config(&t->port_params);
120 if (p->soft.tm.sched == NULL)
124 n_subports = t->port_params.n_subports_per_port;
125 for (subport_id = 0; subport_id < n_subports; subport_id++) {
126 uint32_t n_pipes_per_subport =
127 t->port_params.n_pipes_per_subport;
130 status = rte_sched_subport_config(p->soft.tm.sched,
132 &t->subport_params[subport_id]);
134 rte_sched_port_free(p->soft.tm.sched);
139 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
140 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
141 int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
143 int profile_id = t->pipe_to_profile[pos];
148 status = rte_sched_pipe_config(p->soft.tm.sched,
153 rte_sched_port_free(p->soft.tm.sched);
163 tm_stop(struct pmd_internals *p)
165 if (p->soft.tm.sched)
166 rte_sched_port_free(p->soft.tm.sched);
168 /* Unfreeze hierarchy */
169 p->soft.tm.hierarchy_frozen = 0;
172 static struct tm_shaper_profile *
173 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
175 struct pmd_internals *p = dev->data->dev_private;
176 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
177 struct tm_shaper_profile *sp;
179 TAILQ_FOREACH(sp, spl, node)
180 if (shaper_profile_id == sp->shaper_profile_id)
186 static struct tm_shared_shaper *
187 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
189 struct pmd_internals *p = dev->data->dev_private;
190 struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
191 struct tm_shared_shaper *ss;
193 TAILQ_FOREACH(ss, ssl, node)
194 if (shared_shaper_id == ss->shared_shaper_id)
200 static struct tm_wred_profile *
201 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
203 struct pmd_internals *p = dev->data->dev_private;
204 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
205 struct tm_wred_profile *wp;
207 TAILQ_FOREACH(wp, wpl, node)
208 if (wred_profile_id == wp->wred_profile_id)
214 static struct tm_node *
215 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
217 struct pmd_internals *p = dev->data->dev_private;
218 struct tm_node_list *nl = &p->soft.tm.h.nodes;
221 TAILQ_FOREACH(n, nl, node)
222 if (n->node_id == node_id)
228 static struct tm_node *
229 tm_root_node_present(struct rte_eth_dev *dev)
231 struct pmd_internals *p = dev->data->dev_private;
232 struct tm_node_list *nl = &p->soft.tm.h.nodes;
235 TAILQ_FOREACH(n, nl, node)
236 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
243 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
245 struct pmd_internals *p = dev->data->dev_private;
246 struct tm_node_list *nl = &p->soft.tm.h.nodes;
251 TAILQ_FOREACH(ns, nl, node) {
252 if (ns->level != TM_NODE_LEVEL_SUBPORT)
255 if (ns->node_id == subport_node->node_id)
265 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
267 struct pmd_internals *p = dev->data->dev_private;
268 struct tm_node_list *nl = &p->soft.tm.h.nodes;
273 TAILQ_FOREACH(np, nl, node) {
274 if (np->level != TM_NODE_LEVEL_PIPE ||
275 np->parent_node_id != pipe_node->parent_node_id)
278 if (np->node_id == pipe_node->node_id)
288 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
290 return tc_node->priority;
294 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
296 struct pmd_internals *p = dev->data->dev_private;
297 struct tm_node_list *nl = &p->soft.tm.h.nodes;
302 TAILQ_FOREACH(nq, nl, node) {
303 if (nq->level != TM_NODE_LEVEL_QUEUE ||
304 nq->parent_node_id != queue_node->parent_node_id)
307 if (nq->node_id == queue_node->node_id)
317 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
319 struct pmd_internals *p = dev->data->dev_private;
320 uint32_t n_queues_max = p->params.tm.n_queues;
321 uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
322 uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
323 uint32_t n_subports_max = n_pipes_max;
324 uint32_t n_root_max = 1;
327 case TM_NODE_LEVEL_PORT:
329 case TM_NODE_LEVEL_SUBPORT:
330 return n_subports_max;
331 case TM_NODE_LEVEL_PIPE:
333 case TM_NODE_LEVEL_TC:
335 case TM_NODE_LEVEL_QUEUE:
341 /* Traffic manager node type get */
343 pmd_tm_node_type_get(struct rte_eth_dev *dev,
346 struct rte_tm_error *error)
348 struct pmd_internals *p = dev->data->dev_private;
351 return -rte_tm_error_set(error,
353 RTE_TM_ERROR_TYPE_UNSPECIFIED,
355 rte_strerror(EINVAL));
357 if (node_id == RTE_TM_NODE_ID_NULL ||
358 (tm_node_search(dev, node_id) == NULL))
359 return -rte_tm_error_set(error,
361 RTE_TM_ERROR_TYPE_NODE_ID,
363 rte_strerror(EINVAL));
365 *is_leaf = node_id < p->params.tm.n_queues;
371 #define WRED_SUPPORTED 1
373 #define WRED_SUPPORTED 0
376 #define STATS_MASK_DEFAULT \
377 (RTE_TM_STATS_N_PKTS | \
378 RTE_TM_STATS_N_BYTES | \
379 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
380 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
382 #define STATS_MASK_QUEUE \
383 (STATS_MASK_DEFAULT | \
384 RTE_TM_STATS_N_PKTS_QUEUED)
386 static const struct rte_tm_capabilities tm_cap = {
387 .n_nodes_max = UINT32_MAX,
388 .n_levels_max = TM_NODE_LEVEL_MAX,
390 .non_leaf_nodes_identical = 0,
391 .leaf_nodes_identical = 1,
393 .shaper_n_max = UINT32_MAX,
394 .shaper_private_n_max = UINT32_MAX,
395 .shaper_private_dual_rate_n_max = 0,
396 .shaper_private_rate_min = 1,
397 .shaper_private_rate_max = UINT32_MAX,
399 .shaper_shared_n_max = UINT32_MAX,
400 .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
401 .shaper_shared_n_shapers_per_node_max = 1,
402 .shaper_shared_dual_rate_n_max = 0,
403 .shaper_shared_rate_min = 1,
404 .shaper_shared_rate_max = UINT32_MAX,
406 .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
407 .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
409 .sched_n_children_max = UINT32_MAX,
410 .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
411 .sched_wfq_n_children_per_group_max = UINT32_MAX,
412 .sched_wfq_n_groups_max = 1,
413 .sched_wfq_weight_max = UINT32_MAX,
415 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
416 .cman_wred_byte_mode_supported = 0,
417 .cman_head_drop_supported = 0,
418 .cman_wred_context_n_max = 0,
419 .cman_wred_context_private_n_max = 0,
420 .cman_wred_context_shared_n_max = 0,
421 .cman_wred_context_shared_n_nodes_per_context_max = 0,
422 .cman_wred_context_shared_n_contexts_per_node_max = 0,
424 .mark_vlan_dei_supported = {0, 0, 0},
425 .mark_ip_ecn_tcp_supported = {0, 0, 0},
426 .mark_ip_ecn_sctp_supported = {0, 0, 0},
427 .mark_ip_dscp_supported = {0, 0, 0},
429 .dynamic_update_mask = 0,
431 .stats_mask = STATS_MASK_QUEUE,
434 /* Traffic manager capabilities get */
436 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
437 struct rte_tm_capabilities *cap,
438 struct rte_tm_error *error)
441 return -rte_tm_error_set(error,
443 RTE_TM_ERROR_TYPE_CAPABILITIES,
445 rte_strerror(EINVAL));
447 memcpy(cap, &tm_cap, sizeof(*cap));
449 cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
450 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
451 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
452 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
453 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
455 cap->shaper_private_n_max =
456 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
457 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
458 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
459 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
461 cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
462 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
464 cap->shaper_n_max = cap->shaper_private_n_max +
465 cap->shaper_shared_n_max;
467 cap->shaper_shared_n_nodes_per_shaper_max =
468 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
470 cap->sched_n_children_max = RTE_MAX(
471 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
472 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
474 cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
477 cap->cman_wred_context_private_n_max =
478 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
480 cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
481 cap->cman_wred_context_shared_n_max;
486 static const struct rte_tm_level_capabilities tm_level_cap[] = {
487 [TM_NODE_LEVEL_PORT] = {
489 .n_nodes_nonleaf_max = 1,
490 .n_nodes_leaf_max = 0,
491 .non_leaf_nodes_identical = 1,
492 .leaf_nodes_identical = 0,
495 .shaper_private_supported = 1,
496 .shaper_private_dual_rate_supported = 0,
497 .shaper_private_rate_min = 1,
498 .shaper_private_rate_max = UINT32_MAX,
499 .shaper_shared_n_max = 0,
501 .sched_n_children_max = UINT32_MAX,
502 .sched_sp_n_priorities_max = 1,
503 .sched_wfq_n_children_per_group_max = UINT32_MAX,
504 .sched_wfq_n_groups_max = 1,
505 .sched_wfq_weight_max = 1,
507 .stats_mask = STATS_MASK_DEFAULT,
511 [TM_NODE_LEVEL_SUBPORT] = {
512 .n_nodes_max = UINT32_MAX,
513 .n_nodes_nonleaf_max = UINT32_MAX,
514 .n_nodes_leaf_max = 0,
515 .non_leaf_nodes_identical = 1,
516 .leaf_nodes_identical = 0,
519 .shaper_private_supported = 1,
520 .shaper_private_dual_rate_supported = 0,
521 .shaper_private_rate_min = 1,
522 .shaper_private_rate_max = UINT32_MAX,
523 .shaper_shared_n_max = 0,
525 .sched_n_children_max = UINT32_MAX,
526 .sched_sp_n_priorities_max = 1,
527 .sched_wfq_n_children_per_group_max = UINT32_MAX,
528 .sched_wfq_n_groups_max = 1,
529 #ifdef RTE_SCHED_SUBPORT_TC_OV
530 .sched_wfq_weight_max = UINT32_MAX,
532 .sched_wfq_weight_max = 1,
534 .stats_mask = STATS_MASK_DEFAULT,
538 [TM_NODE_LEVEL_PIPE] = {
539 .n_nodes_max = UINT32_MAX,
540 .n_nodes_nonleaf_max = UINT32_MAX,
541 .n_nodes_leaf_max = 0,
542 .non_leaf_nodes_identical = 1,
543 .leaf_nodes_identical = 0,
546 .shaper_private_supported = 1,
547 .shaper_private_dual_rate_supported = 0,
548 .shaper_private_rate_min = 1,
549 .shaper_private_rate_max = UINT32_MAX,
550 .shaper_shared_n_max = 0,
552 .sched_n_children_max =
553 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
554 .sched_sp_n_priorities_max =
555 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
556 .sched_wfq_n_children_per_group_max = 1,
557 .sched_wfq_n_groups_max = 0,
558 .sched_wfq_weight_max = 1,
560 .stats_mask = STATS_MASK_DEFAULT,
564 [TM_NODE_LEVEL_TC] = {
565 .n_nodes_max = UINT32_MAX,
566 .n_nodes_nonleaf_max = UINT32_MAX,
567 .n_nodes_leaf_max = 0,
568 .non_leaf_nodes_identical = 1,
569 .leaf_nodes_identical = 0,
572 .shaper_private_supported = 1,
573 .shaper_private_dual_rate_supported = 0,
574 .shaper_private_rate_min = 1,
575 .shaper_private_rate_max = UINT32_MAX,
576 .shaper_shared_n_max = 1,
578 .sched_n_children_max =
579 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
580 .sched_sp_n_priorities_max = 1,
581 .sched_wfq_n_children_per_group_max =
582 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
583 .sched_wfq_n_groups_max = 1,
584 .sched_wfq_weight_max = UINT32_MAX,
586 .stats_mask = STATS_MASK_DEFAULT,
590 [TM_NODE_LEVEL_QUEUE] = {
591 .n_nodes_max = UINT32_MAX,
592 .n_nodes_nonleaf_max = 0,
593 .n_nodes_leaf_max = UINT32_MAX,
594 .non_leaf_nodes_identical = 0,
595 .leaf_nodes_identical = 1,
598 .shaper_private_supported = 0,
599 .shaper_private_dual_rate_supported = 0,
600 .shaper_private_rate_min = 0,
601 .shaper_private_rate_max = 0,
602 .shaper_shared_n_max = 0,
604 .cman_head_drop_supported = 0,
605 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
606 .cman_wred_byte_mode_supported = 0,
607 .cman_wred_context_private_supported = WRED_SUPPORTED,
608 .cman_wred_context_shared_n_max = 0,
610 .stats_mask = STATS_MASK_QUEUE,
615 /* Traffic manager level capabilities get */
617 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
619 struct rte_tm_level_capabilities *cap,
620 struct rte_tm_error *error)
623 return -rte_tm_error_set(error,
625 RTE_TM_ERROR_TYPE_CAPABILITIES,
627 rte_strerror(EINVAL));
629 if (level_id >= TM_NODE_LEVEL_MAX)
630 return -rte_tm_error_set(error,
632 RTE_TM_ERROR_TYPE_LEVEL_ID,
634 rte_strerror(EINVAL));
636 memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
639 case TM_NODE_LEVEL_PORT:
640 cap->nonleaf.sched_n_children_max =
641 tm_level_get_max_nodes(dev,
642 TM_NODE_LEVEL_SUBPORT);
643 cap->nonleaf.sched_wfq_n_children_per_group_max =
644 cap->nonleaf.sched_n_children_max;
647 case TM_NODE_LEVEL_SUBPORT:
648 cap->n_nodes_max = tm_level_get_max_nodes(dev,
649 TM_NODE_LEVEL_SUBPORT);
650 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
651 cap->nonleaf.sched_n_children_max =
652 tm_level_get_max_nodes(dev,
654 cap->nonleaf.sched_wfq_n_children_per_group_max =
655 cap->nonleaf.sched_n_children_max;
658 case TM_NODE_LEVEL_PIPE:
659 cap->n_nodes_max = tm_level_get_max_nodes(dev,
661 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
664 case TM_NODE_LEVEL_TC:
665 cap->n_nodes_max = tm_level_get_max_nodes(dev,
667 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
670 case TM_NODE_LEVEL_QUEUE:
672 cap->n_nodes_max = tm_level_get_max_nodes(dev,
673 TM_NODE_LEVEL_QUEUE);
674 cap->n_nodes_leaf_max = cap->n_nodes_max;
681 static const struct rte_tm_node_capabilities tm_node_cap[] = {
682 [TM_NODE_LEVEL_PORT] = {
683 .shaper_private_supported = 1,
684 .shaper_private_dual_rate_supported = 0,
685 .shaper_private_rate_min = 1,
686 .shaper_private_rate_max = UINT32_MAX,
687 .shaper_shared_n_max = 0,
690 .sched_n_children_max = UINT32_MAX,
691 .sched_sp_n_priorities_max = 1,
692 .sched_wfq_n_children_per_group_max = UINT32_MAX,
693 .sched_wfq_n_groups_max = 1,
694 .sched_wfq_weight_max = 1,
697 .stats_mask = STATS_MASK_DEFAULT,
700 [TM_NODE_LEVEL_SUBPORT] = {
701 .shaper_private_supported = 1,
702 .shaper_private_dual_rate_supported = 0,
703 .shaper_private_rate_min = 1,
704 .shaper_private_rate_max = UINT32_MAX,
705 .shaper_shared_n_max = 0,
708 .sched_n_children_max = UINT32_MAX,
709 .sched_sp_n_priorities_max = 1,
710 .sched_wfq_n_children_per_group_max = UINT32_MAX,
711 .sched_wfq_n_groups_max = 1,
712 .sched_wfq_weight_max = UINT32_MAX,
715 .stats_mask = STATS_MASK_DEFAULT,
718 [TM_NODE_LEVEL_PIPE] = {
719 .shaper_private_supported = 1,
720 .shaper_private_dual_rate_supported = 0,
721 .shaper_private_rate_min = 1,
722 .shaper_private_rate_max = UINT32_MAX,
723 .shaper_shared_n_max = 0,
726 .sched_n_children_max =
727 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
728 .sched_sp_n_priorities_max =
729 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
730 .sched_wfq_n_children_per_group_max = 1,
731 .sched_wfq_n_groups_max = 0,
732 .sched_wfq_weight_max = 1,
735 .stats_mask = STATS_MASK_DEFAULT,
738 [TM_NODE_LEVEL_TC] = {
739 .shaper_private_supported = 1,
740 .shaper_private_dual_rate_supported = 0,
741 .shaper_private_rate_min = 1,
742 .shaper_private_rate_max = UINT32_MAX,
743 .shaper_shared_n_max = 1,
746 .sched_n_children_max =
747 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
748 .sched_sp_n_priorities_max = 1,
749 .sched_wfq_n_children_per_group_max =
750 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
751 .sched_wfq_n_groups_max = 1,
752 .sched_wfq_weight_max = UINT32_MAX,
755 .stats_mask = STATS_MASK_DEFAULT,
758 [TM_NODE_LEVEL_QUEUE] = {
759 .shaper_private_supported = 0,
760 .shaper_private_dual_rate_supported = 0,
761 .shaper_private_rate_min = 0,
762 .shaper_private_rate_max = 0,
763 .shaper_shared_n_max = 0,
767 .cman_head_drop_supported = 0,
768 .cman_wred_packet_mode_supported = WRED_SUPPORTED,
769 .cman_wred_byte_mode_supported = 0,
770 .cman_wred_context_private_supported = WRED_SUPPORTED,
771 .cman_wred_context_shared_n_max = 0,
774 .stats_mask = STATS_MASK_QUEUE,
778 /* Traffic manager node capabilities get */
780 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
782 struct rte_tm_node_capabilities *cap,
783 struct rte_tm_error *error)
785 struct tm_node *tm_node;
788 return -rte_tm_error_set(error,
790 RTE_TM_ERROR_TYPE_CAPABILITIES,
792 rte_strerror(EINVAL));
794 tm_node = tm_node_search(dev, node_id);
796 return -rte_tm_error_set(error,
798 RTE_TM_ERROR_TYPE_NODE_ID,
800 rte_strerror(EINVAL));
802 memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
804 switch (tm_node->level) {
805 case TM_NODE_LEVEL_PORT:
806 cap->nonleaf.sched_n_children_max =
807 tm_level_get_max_nodes(dev,
808 TM_NODE_LEVEL_SUBPORT);
809 cap->nonleaf.sched_wfq_n_children_per_group_max =
810 cap->nonleaf.sched_n_children_max;
813 case TM_NODE_LEVEL_SUBPORT:
814 cap->nonleaf.sched_n_children_max =
815 tm_level_get_max_nodes(dev,
817 cap->nonleaf.sched_wfq_n_children_per_group_max =
818 cap->nonleaf.sched_n_children_max;
821 case TM_NODE_LEVEL_PIPE:
822 case TM_NODE_LEVEL_TC:
823 case TM_NODE_LEVEL_QUEUE:
832 shaper_profile_check(struct rte_eth_dev *dev,
833 uint32_t shaper_profile_id,
834 struct rte_tm_shaper_params *profile,
835 struct rte_tm_error *error)
837 struct tm_shaper_profile *sp;
839 /* Shaper profile ID must not be NONE. */
840 if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
841 return -rte_tm_error_set(error,
843 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
845 rte_strerror(EINVAL));
847 /* Shaper profile must not exist. */
848 sp = tm_shaper_profile_search(dev, shaper_profile_id);
850 return -rte_tm_error_set(error,
852 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
854 rte_strerror(EEXIST));
856 /* Profile must not be NULL. */
858 return -rte_tm_error_set(error,
860 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
862 rte_strerror(EINVAL));
864 /* Peak rate: non-zero, 32-bit */
865 if (profile->peak.rate == 0 ||
866 profile->peak.rate >= UINT32_MAX)
867 return -rte_tm_error_set(error,
869 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
871 rte_strerror(EINVAL));
873 /* Peak size: non-zero, 32-bit */
874 if (profile->peak.size == 0 ||
875 profile->peak.size >= UINT32_MAX)
876 return -rte_tm_error_set(error,
878 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
880 rte_strerror(EINVAL));
882 /* Dual-rate profiles are not supported. */
883 if (profile->committed.rate != 0)
884 return -rte_tm_error_set(error,
886 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
888 rte_strerror(EINVAL));
890 /* Packet length adjust: 24 bytes */
891 if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
892 return -rte_tm_error_set(error,
894 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
896 rte_strerror(EINVAL));
901 /* Traffic manager shaper profile add */
903 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
904 uint32_t shaper_profile_id,
905 struct rte_tm_shaper_params *profile,
906 struct rte_tm_error *error)
908 struct pmd_internals *p = dev->data->dev_private;
909 struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
910 struct tm_shaper_profile *sp;
913 /* Check input params */
914 status = shaper_profile_check(dev, shaper_profile_id, profile, error);
918 /* Memory allocation */
919 sp = calloc(1, sizeof(struct tm_shaper_profile));
921 return -rte_tm_error_set(error,
923 RTE_TM_ERROR_TYPE_UNSPECIFIED,
925 rte_strerror(ENOMEM));
928 sp->shaper_profile_id = shaper_profile_id;
929 memcpy(&sp->params, profile, sizeof(sp->params));
932 TAILQ_INSERT_TAIL(spl, sp, node);
933 p->soft.tm.h.n_shaper_profiles++;
938 /* Traffic manager shaper profile delete */
940 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
941 uint32_t shaper_profile_id,
942 struct rte_tm_error *error)
944 struct pmd_internals *p = dev->data->dev_private;
945 struct tm_shaper_profile *sp;
948 sp = tm_shaper_profile_search(dev, shaper_profile_id);
950 return -rte_tm_error_set(error,
952 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
954 rte_strerror(EINVAL));
958 return -rte_tm_error_set(error,
960 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
962 rte_strerror(EBUSY));
964 /* Remove from list */
965 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
966 p->soft.tm.h.n_shaper_profiles--;
972 static struct tm_node *
973 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
974 struct tm_shared_shaper *ss)
976 struct pmd_internals *p = dev->data->dev_private;
977 struct tm_node_list *nl = &p->soft.tm.h.nodes;
980 /* Subport: each TC uses shared shaper */
981 TAILQ_FOREACH(n, nl, node) {
982 if (n->level != TM_NODE_LEVEL_TC ||
983 n->params.n_shared_shapers == 0 ||
984 n->params.shared_shaper_id[0] != ss->shared_shaper_id)
994 update_subport_tc_rate(struct rte_eth_dev *dev,
996 struct tm_shared_shaper *ss,
997 struct tm_shaper_profile *sp_new)
999 struct pmd_internals *p = dev->data->dev_private;
1000 uint32_t tc_id = tm_node_tc_id(dev, nt);
1002 struct tm_node *np = nt->parent_node;
1004 struct tm_node *ns = np->parent_node;
1005 uint32_t subport_id = tm_node_subport_id(dev, ns);
1007 struct rte_sched_subport_params subport_params;
1009 struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1010 ss->shaper_profile_id);
1012 /* Derive new subport configuration. */
1013 memcpy(&subport_params,
1014 &p->soft.tm.params.subport_params[subport_id],
1015 sizeof(subport_params));
1016 subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1018 /* Update the subport configuration. */
1019 if (rte_sched_subport_config(p->soft.tm.sched,
1020 subport_id, &subport_params))
1023 /* Commit changes. */
1026 ss->shaper_profile_id = sp_new->shaper_profile_id;
1029 memcpy(&p->soft.tm.params.subport_params[subport_id],
1031 sizeof(subport_params));
1036 /* Traffic manager shared shaper add/update */
1038 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1039 uint32_t shared_shaper_id,
1040 uint32_t shaper_profile_id,
1041 struct rte_tm_error *error)
1043 struct pmd_internals *p = dev->data->dev_private;
1044 struct tm_shared_shaper *ss;
1045 struct tm_shaper_profile *sp;
1048 /* Shaper profile must be valid. */
1049 sp = tm_shaper_profile_search(dev, shaper_profile_id);
1051 return -rte_tm_error_set(error,
1053 RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1055 rte_strerror(EINVAL));
1058 * Add new shared shaper
1060 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1062 struct tm_shared_shaper_list *ssl =
1063 &p->soft.tm.h.shared_shapers;
1065 /* Hierarchy must not be frozen */
1066 if (p->soft.tm.hierarchy_frozen)
1067 return -rte_tm_error_set(error,
1069 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1071 rte_strerror(EBUSY));
1073 /* Memory allocation */
1074 ss = calloc(1, sizeof(struct tm_shared_shaper));
1076 return -rte_tm_error_set(error,
1078 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1080 rte_strerror(ENOMEM));
1083 ss->shared_shaper_id = shared_shaper_id;
1084 ss->shaper_profile_id = shaper_profile_id;
1087 TAILQ_INSERT_TAIL(ssl, ss, node);
1088 p->soft.tm.h.n_shared_shapers++;
1094 * Update existing shared shaper
1096 /* Hierarchy must be frozen (run-time update) */
1097 if (p->soft.tm.hierarchy_frozen == 0)
1098 return -rte_tm_error_set(error,
1100 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1102 rte_strerror(EBUSY));
1105 /* Propagate change. */
1106 nt = tm_shared_shaper_get_tc(dev, ss);
1107 if (update_subport_tc_rate(dev, nt, ss, sp))
1108 return -rte_tm_error_set(error,
1110 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1112 rte_strerror(EINVAL));
1117 /* Traffic manager shared shaper delete */
1119 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1120 uint32_t shared_shaper_id,
1121 struct rte_tm_error *error)
1123 struct pmd_internals *p = dev->data->dev_private;
1124 struct tm_shared_shaper *ss;
1126 /* Check existing */
1127 ss = tm_shared_shaper_search(dev, shared_shaper_id);
1129 return -rte_tm_error_set(error,
1131 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1133 rte_strerror(EINVAL));
1137 return -rte_tm_error_set(error,
1139 RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1141 rte_strerror(EBUSY));
1143 /* Remove from list */
1144 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1145 p->soft.tm.h.n_shared_shapers--;
1152 wred_profile_check(struct rte_eth_dev *dev,
1153 uint32_t wred_profile_id,
1154 struct rte_tm_wred_params *profile,
1155 struct rte_tm_error *error)
1157 struct tm_wred_profile *wp;
1158 enum rte_tm_color color;
1160 /* WRED profile ID must not be NONE. */
1161 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1162 return -rte_tm_error_set(error,
1164 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1166 rte_strerror(EINVAL));
1168 /* WRED profile must not exist. */
1169 wp = tm_wred_profile_search(dev, wred_profile_id);
1171 return -rte_tm_error_set(error,
1173 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1175 rte_strerror(EEXIST));
1177 /* Profile must not be NULL. */
1178 if (profile == NULL)
1179 return -rte_tm_error_set(error,
1181 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1183 rte_strerror(EINVAL));
1185 /* WRED profile should be in packet mode */
1186 if (profile->packet_mode == 0)
1187 return -rte_tm_error_set(error,
1189 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1191 rte_strerror(ENOTSUP));
1193 /* min_th <= max_th, max_th > 0 */
1194 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1195 uint32_t min_th = profile->red_params[color].min_th;
1196 uint32_t max_th = profile->red_params[color].max_th;
1198 if (min_th > max_th ||
1200 min_th > UINT16_MAX ||
1201 max_th > UINT16_MAX)
1202 return -rte_tm_error_set(error,
1204 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1206 rte_strerror(EINVAL));
1212 /* Traffic manager WRED profile add */
1214 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1215 uint32_t wred_profile_id,
1216 struct rte_tm_wred_params *profile,
1217 struct rte_tm_error *error)
1219 struct pmd_internals *p = dev->data->dev_private;
1220 struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1221 struct tm_wred_profile *wp;
1224 /* Check input params */
1225 status = wred_profile_check(dev, wred_profile_id, profile, error);
1229 /* Memory allocation */
1230 wp = calloc(1, sizeof(struct tm_wred_profile));
1232 return -rte_tm_error_set(error,
1234 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1236 rte_strerror(ENOMEM));
1239 wp->wred_profile_id = wred_profile_id;
1240 memcpy(&wp->params, profile, sizeof(wp->params));
1243 TAILQ_INSERT_TAIL(wpl, wp, node);
1244 p->soft.tm.h.n_wred_profiles++;
1249 /* Traffic manager WRED profile delete */
1251 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1252 uint32_t wred_profile_id,
1253 struct rte_tm_error *error)
1255 struct pmd_internals *p = dev->data->dev_private;
1256 struct tm_wred_profile *wp;
1258 /* Check existing */
1259 wp = tm_wred_profile_search(dev, wred_profile_id);
1261 return -rte_tm_error_set(error,
1263 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1265 rte_strerror(EINVAL));
1269 return -rte_tm_error_set(error,
1271 RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1273 rte_strerror(EBUSY));
1275 /* Remove from list */
1276 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1277 p->soft.tm.h.n_wred_profiles--;
1284 node_add_check_port(struct rte_eth_dev *dev,
1286 uint32_t parent_node_id __rte_unused,
1289 uint32_t level_id __rte_unused,
1290 struct rte_tm_node_params *params,
1291 struct rte_tm_error *error)
1293 struct pmd_internals *p = dev->data->dev_private;
1294 struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1295 params->shaper_profile_id);
1297 /* node type: non-leaf */
1298 if (node_id < p->params.tm.n_queues)
1299 return -rte_tm_error_set(error,
1301 RTE_TM_ERROR_TYPE_NODE_ID,
1303 rte_strerror(EINVAL));
1305 /* Priority must be 0 */
1307 return -rte_tm_error_set(error,
1309 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1311 rte_strerror(EINVAL));
1313 /* Weight must be 1 */
1315 return -rte_tm_error_set(error,
1317 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1319 rte_strerror(EINVAL));
1321 /* Shaper must be valid */
1322 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1324 return -rte_tm_error_set(error,
1326 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1328 rte_strerror(EINVAL));
1330 /* No shared shapers */
1331 if (params->n_shared_shapers != 0)
1332 return -rte_tm_error_set(error,
1334 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1336 rte_strerror(EINVAL));
1338 /* Number of SP priorities must be 1 */
1339 if (params->nonleaf.n_sp_priorities != 1)
1340 return -rte_tm_error_set(error,
1342 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1344 rte_strerror(EINVAL));
1347 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1348 return -rte_tm_error_set(error,
1350 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1352 rte_strerror(EINVAL));
1358 node_add_check_subport(struct rte_eth_dev *dev,
1360 uint32_t parent_node_id __rte_unused,
1363 uint32_t level_id __rte_unused,
1364 struct rte_tm_node_params *params,
1365 struct rte_tm_error *error)
1367 struct pmd_internals *p = dev->data->dev_private;
1369 /* node type: non-leaf */
1370 if (node_id < p->params.tm.n_queues)
1371 return -rte_tm_error_set(error,
1373 RTE_TM_ERROR_TYPE_NODE_ID,
1375 rte_strerror(EINVAL));
1377 /* Priority must be 0 */
1379 return -rte_tm_error_set(error,
1381 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1383 rte_strerror(EINVAL));
1385 /* Weight must be 1 */
1387 return -rte_tm_error_set(error,
1389 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1391 rte_strerror(EINVAL));
1393 /* Shaper must be valid */
1394 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1395 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1396 return -rte_tm_error_set(error,
1398 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1400 rte_strerror(EINVAL));
1402 /* No shared shapers */
1403 if (params->n_shared_shapers != 0)
1404 return -rte_tm_error_set(error,
1406 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1408 rte_strerror(EINVAL));
1410 /* Number of SP priorities must be 1 */
1411 if (params->nonleaf.n_sp_priorities != 1)
1412 return -rte_tm_error_set(error,
1414 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1416 rte_strerror(EINVAL));
1419 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1420 return -rte_tm_error_set(error,
1422 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1424 rte_strerror(EINVAL));
1430 node_add_check_pipe(struct rte_eth_dev *dev,
1432 uint32_t parent_node_id __rte_unused,
1434 uint32_t weight __rte_unused,
1435 uint32_t level_id __rte_unused,
1436 struct rte_tm_node_params *params,
1437 struct rte_tm_error *error)
1439 struct pmd_internals *p = dev->data->dev_private;
1441 /* node type: non-leaf */
1442 if (node_id < p->params.tm.n_queues)
1443 return -rte_tm_error_set(error,
1445 RTE_TM_ERROR_TYPE_NODE_ID,
1447 rte_strerror(EINVAL));
1449 /* Priority must be 0 */
1451 return -rte_tm_error_set(error,
1453 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1455 rte_strerror(EINVAL));
1457 /* Shaper must be valid */
1458 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1459 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1460 return -rte_tm_error_set(error,
1462 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1464 rte_strerror(EINVAL));
1466 /* No shared shapers */
1467 if (params->n_shared_shapers != 0)
1468 return -rte_tm_error_set(error,
1470 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1472 rte_strerror(EINVAL));
1474 /* Number of SP priorities must be 4 */
1475 if (params->nonleaf.n_sp_priorities !=
1476 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1477 return -rte_tm_error_set(error,
1479 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1481 rte_strerror(EINVAL));
1483 /* WFQ mode must be byte mode */
1484 if (params->nonleaf.wfq_weight_mode != NULL &&
1485 params->nonleaf.wfq_weight_mode[0] != 0 &&
1486 params->nonleaf.wfq_weight_mode[1] != 0 &&
1487 params->nonleaf.wfq_weight_mode[2] != 0 &&
1488 params->nonleaf.wfq_weight_mode[3] != 0)
1489 return -rte_tm_error_set(error,
1491 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1493 rte_strerror(EINVAL));
1496 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1497 return -rte_tm_error_set(error,
1499 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1501 rte_strerror(EINVAL));
1507 node_add_check_tc(struct rte_eth_dev *dev,
1509 uint32_t parent_node_id __rte_unused,
1510 uint32_t priority __rte_unused,
1512 uint32_t level_id __rte_unused,
1513 struct rte_tm_node_params *params,
1514 struct rte_tm_error *error)
1516 struct pmd_internals *p = dev->data->dev_private;
1518 /* node type: non-leaf */
1519 if (node_id < p->params.tm.n_queues)
1520 return -rte_tm_error_set(error,
1522 RTE_TM_ERROR_TYPE_NODE_ID,
1524 rte_strerror(EINVAL));
1526 /* Weight must be 1 */
1528 return -rte_tm_error_set(error,
1530 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1532 rte_strerror(EINVAL));
1534 /* Shaper must be valid */
1535 if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1536 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1537 return -rte_tm_error_set(error,
1539 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1541 rte_strerror(EINVAL));
1543 /* Single valid shared shaper */
1544 if (params->n_shared_shapers > 1)
1545 return -rte_tm_error_set(error,
1547 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1549 rte_strerror(EINVAL));
1551 if (params->n_shared_shapers == 1 &&
1552 (params->shared_shaper_id == NULL ||
1553 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1554 return -rte_tm_error_set(error,
1556 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1558 rte_strerror(EINVAL));
1560 /* Number of priorities must be 1 */
1561 if (params->nonleaf.n_sp_priorities != 1)
1562 return -rte_tm_error_set(error,
1564 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1566 rte_strerror(EINVAL));
1569 if (params->stats_mask & ~STATS_MASK_DEFAULT)
1570 return -rte_tm_error_set(error,
1572 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1574 rte_strerror(EINVAL));
1580 node_add_check_queue(struct rte_eth_dev *dev,
1582 uint32_t parent_node_id __rte_unused,
1584 uint32_t weight __rte_unused,
1585 uint32_t level_id __rte_unused,
1586 struct rte_tm_node_params *params,
1587 struct rte_tm_error *error)
1589 struct pmd_internals *p = dev->data->dev_private;
1591 /* node type: leaf */
1592 if (node_id >= p->params.tm.n_queues)
1593 return -rte_tm_error_set(error,
1595 RTE_TM_ERROR_TYPE_NODE_ID,
1597 rte_strerror(EINVAL));
1599 /* Priority must be 0 */
1601 return -rte_tm_error_set(error,
1603 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1605 rte_strerror(EINVAL));
1608 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1609 return -rte_tm_error_set(error,
1611 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1613 rte_strerror(EINVAL));
1615 /* No shared shapers */
1616 if (params->n_shared_shapers != 0)
1617 return -rte_tm_error_set(error,
1619 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1621 rte_strerror(EINVAL));
1623 /* Congestion management must not be head drop */
1624 if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1625 return -rte_tm_error_set(error,
1627 RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1629 rte_strerror(EINVAL));
1631 /* Congestion management set to WRED */
1632 if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1633 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1634 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1637 /* WRED profile (for private WRED context) must be valid */
1638 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1640 return -rte_tm_error_set(error,
1642 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1644 rte_strerror(EINVAL));
1646 /* No shared WRED contexts */
1647 if (params->leaf.wred.n_shared_wred_contexts != 0)
1648 return -rte_tm_error_set(error,
1650 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1652 rte_strerror(EINVAL));
1656 if (params->stats_mask & ~STATS_MASK_QUEUE)
1657 return -rte_tm_error_set(error,
1659 RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1661 rte_strerror(EINVAL));
1667 node_add_check(struct rte_eth_dev *dev,
1669 uint32_t parent_node_id,
1673 struct rte_tm_node_params *params,
1674 struct rte_tm_error *error)
1680 /* node_id, parent_node_id:
1681 * -node_id must not be RTE_TM_NODE_ID_NULL
1682 * -node_id must not be in use
1683 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1684 * -root node must not exist
1685 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1686 * -parent_node_id must be valid
1688 if (node_id == RTE_TM_NODE_ID_NULL)
1689 return -rte_tm_error_set(error,
1691 RTE_TM_ERROR_TYPE_NODE_ID,
1693 rte_strerror(EINVAL));
1695 if (tm_node_search(dev, node_id))
1696 return -rte_tm_error_set(error,
1698 RTE_TM_ERROR_TYPE_NODE_ID,
1700 rte_strerror(EEXIST));
1702 if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1704 if (tm_root_node_present(dev))
1705 return -rte_tm_error_set(error,
1707 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1709 rte_strerror(EEXIST));
1711 pn = tm_node_search(dev, parent_node_id);
1713 return -rte_tm_error_set(error,
1715 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1717 rte_strerror(EINVAL));
1720 /* priority: must be 0 .. 3 */
1721 if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1722 return -rte_tm_error_set(error,
1724 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1726 rte_strerror(EINVAL));
1728 /* weight: must be 1 .. 255 */
1729 if (weight == 0 || weight >= UINT8_MAX)
1730 return -rte_tm_error_set(error,
1732 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1734 rte_strerror(EINVAL));
1736 /* level_id: if valid, then
1737 * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1738 * -level_id must be zero
1739 * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1740 * -level_id must be parent level ID plus one
1742 level = (pn == NULL) ? 0 : pn->level + 1;
1743 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1744 return -rte_tm_error_set(error,
1746 RTE_TM_ERROR_TYPE_LEVEL_ID,
1748 rte_strerror(EINVAL));
1750 /* params: must not be NULL */
1752 return -rte_tm_error_set(error,
1754 RTE_TM_ERROR_TYPE_NODE_PARAMS,
1756 rte_strerror(EINVAL));
1758 /* params: per level checks */
1760 case TM_NODE_LEVEL_PORT:
1761 status = node_add_check_port(dev, node_id,
1762 parent_node_id, priority, weight, level_id,
1768 case TM_NODE_LEVEL_SUBPORT:
1769 status = node_add_check_subport(dev, node_id,
1770 parent_node_id, priority, weight, level_id,
1776 case TM_NODE_LEVEL_PIPE:
1777 status = node_add_check_pipe(dev, node_id,
1778 parent_node_id, priority, weight, level_id,
1784 case TM_NODE_LEVEL_TC:
1785 status = node_add_check_tc(dev, node_id,
1786 parent_node_id, priority, weight, level_id,
1792 case TM_NODE_LEVEL_QUEUE:
1793 status = node_add_check_queue(dev, node_id,
1794 parent_node_id, priority, weight, level_id,
1801 return -rte_tm_error_set(error,
1803 RTE_TM_ERROR_TYPE_LEVEL_ID,
1805 rte_strerror(EINVAL));
1811 /* Traffic manager node add */
1813 pmd_tm_node_add(struct rte_eth_dev *dev,
1815 uint32_t parent_node_id,
1819 struct rte_tm_node_params *params,
1820 struct rte_tm_error *error)
1822 struct pmd_internals *p = dev->data->dev_private;
1823 struct tm_node_list *nl = &p->soft.tm.h.nodes;
1829 if (p->soft.tm.hierarchy_frozen)
1830 return -rte_tm_error_set(error,
1832 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1834 rte_strerror(EBUSY));
1836 status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1837 level_id, params, error);
1841 /* Memory allocation */
1842 n = calloc(1, sizeof(struct tm_node));
1844 return -rte_tm_error_set(error,
1846 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1848 rte_strerror(ENOMEM));
1851 n->node_id = node_id;
1852 n->parent_node_id = parent_node_id;
1853 n->priority = priority;
1856 if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1857 n->parent_node = tm_node_search(dev, parent_node_id);
1858 n->level = n->parent_node->level + 1;
1861 if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1862 n->shaper_profile = tm_shaper_profile_search(dev,
1863 params->shaper_profile_id);
1865 if (n->level == TM_NODE_LEVEL_QUEUE &&
1866 params->leaf.cman == RTE_TM_CMAN_WRED)
1867 n->wred_profile = tm_wred_profile_search(dev,
1868 params->leaf.wred.wred_profile_id);
1870 memcpy(&n->params, params, sizeof(n->params));
1873 TAILQ_INSERT_TAIL(nl, n, node);
1874 p->soft.tm.h.n_nodes++;
1876 /* Update dependencies */
1878 n->parent_node->n_children++;
1880 if (n->shaper_profile)
1881 n->shaper_profile->n_users++;
1883 for (i = 0; i < params->n_shared_shapers; i++) {
1884 struct tm_shared_shaper *ss;
1886 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1890 if (n->wred_profile)
1891 n->wred_profile->n_users++;
1893 p->soft.tm.h.n_tm_nodes[n->level]++;
1898 /* Traffic manager node delete */
1900 pmd_tm_node_delete(struct rte_eth_dev *dev,
1902 struct rte_tm_error *error)
1904 struct pmd_internals *p = dev->data->dev_private;
1908 /* Check hierarchy changes are currently allowed */
1909 if (p->soft.tm.hierarchy_frozen)
1910 return -rte_tm_error_set(error,
1912 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1914 rte_strerror(EBUSY));
1916 /* Check existing */
1917 n = tm_node_search(dev, node_id);
1919 return -rte_tm_error_set(error,
1921 RTE_TM_ERROR_TYPE_NODE_ID,
1923 rte_strerror(EINVAL));
1927 return -rte_tm_error_set(error,
1929 RTE_TM_ERROR_TYPE_NODE_ID,
1931 rte_strerror(EBUSY));
1933 /* Update dependencies */
1934 p->soft.tm.h.n_tm_nodes[n->level]--;
1936 if (n->wred_profile)
1937 n->wred_profile->n_users--;
1939 for (i = 0; i < n->params.n_shared_shapers; i++) {
1940 struct tm_shared_shaper *ss;
1942 ss = tm_shared_shaper_search(dev,
1943 n->params.shared_shaper_id[i]);
1947 if (n->shaper_profile)
1948 n->shaper_profile->n_users--;
1951 n->parent_node->n_children--;
1953 /* Remove from list */
1954 TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
1955 p->soft.tm.h.n_nodes--;
1963 pipe_profile_build(struct rte_eth_dev *dev,
1965 struct rte_sched_pipe_params *pp)
1967 struct pmd_internals *p = dev->data->dev_private;
1968 struct tm_hierarchy *h = &p->soft.tm.h;
1969 struct tm_node_list *nl = &h->nodes;
1970 struct tm_node *nt, *nq;
1972 memset(pp, 0, sizeof(*pp));
1975 pp->tb_rate = np->shaper_profile->params.peak.rate;
1976 pp->tb_size = np->shaper_profile->params.peak.size;
1978 /* Traffic Class (TC) */
1979 pp->tc_period = PIPE_TC_PERIOD;
1981 #ifdef RTE_SCHED_SUBPORT_TC_OV
1982 pp->tc_ov_weight = np->weight;
1985 TAILQ_FOREACH(nt, nl, node) {
1986 uint32_t queue_id = 0;
1988 if (nt->level != TM_NODE_LEVEL_TC ||
1989 nt->parent_node_id != np->node_id)
1992 pp->tc_rate[nt->priority] =
1993 nt->shaper_profile->params.peak.rate;
1996 TAILQ_FOREACH(nq, nl, node) {
1997 uint32_t pipe_queue_id;
1999 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2000 nq->parent_node_id != nt->node_id)
2003 pipe_queue_id = nt->priority *
2004 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2005 pp->wrr_weights[pipe_queue_id] = nq->weight;
2013 pipe_profile_free_exists(struct rte_eth_dev *dev,
2014 uint32_t *pipe_profile_id)
2016 struct pmd_internals *p = dev->data->dev_private;
2017 struct tm_params *t = &p->soft.tm.params;
2019 if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2020 *pipe_profile_id = t->n_pipe_profiles;
2028 pipe_profile_exists(struct rte_eth_dev *dev,
2029 struct rte_sched_pipe_params *pp,
2030 uint32_t *pipe_profile_id)
2032 struct pmd_internals *p = dev->data->dev_private;
2033 struct tm_params *t = &p->soft.tm.params;
2036 for (i = 0; i < t->n_pipe_profiles; i++)
2037 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2038 if (pipe_profile_id)
2039 *pipe_profile_id = i;
2047 pipe_profile_install(struct rte_eth_dev *dev,
2048 struct rte_sched_pipe_params *pp,
2049 uint32_t pipe_profile_id)
2051 struct pmd_internals *p = dev->data->dev_private;
2052 struct tm_params *t = &p->soft.tm.params;
2054 memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2055 t->n_pipe_profiles++;
2059 pipe_profile_mark(struct rte_eth_dev *dev,
2060 uint32_t subport_id,
2062 uint32_t pipe_profile_id)
2064 struct pmd_internals *p = dev->data->dev_private;
2065 struct tm_hierarchy *h = &p->soft.tm.h;
2066 struct tm_params *t = &p->soft.tm.params;
2067 uint32_t n_pipes_per_subport, pos;
2069 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2070 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2071 pos = subport_id * n_pipes_per_subport + pipe_id;
2073 t->pipe_to_profile[pos] = pipe_profile_id;
2076 static struct rte_sched_pipe_params *
2077 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2079 struct pmd_internals *p = dev->data->dev_private;
2080 struct tm_hierarchy *h = &p->soft.tm.h;
2081 struct tm_params *t = &p->soft.tm.params;
2082 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2083 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2085 uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2086 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2088 uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2089 uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2091 return &t->pipe_profiles[pipe_profile_id];
2095 pipe_profiles_generate(struct rte_eth_dev *dev)
2097 struct pmd_internals *p = dev->data->dev_private;
2098 struct tm_hierarchy *h = &p->soft.tm.h;
2099 struct tm_node_list *nl = &h->nodes;
2100 struct tm_node *ns, *np;
2101 uint32_t subport_id;
2103 /* Objective: Fill in the following fields in struct tm_params:
2110 TAILQ_FOREACH(ns, nl, node) {
2113 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2117 TAILQ_FOREACH(np, nl, node) {
2118 struct rte_sched_pipe_params pp;
2121 if (np->level != TM_NODE_LEVEL_PIPE ||
2122 np->parent_node_id != ns->node_id)
2125 pipe_profile_build(dev, np, &pp);
2127 if (!pipe_profile_exists(dev, &pp, &pos)) {
2128 if (!pipe_profile_free_exists(dev, &pos))
2131 pipe_profile_install(dev, &pp, pos);
2134 pipe_profile_mark(dev, subport_id, pipe_id, pos);
2145 static struct tm_wred_profile *
2146 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2148 struct pmd_internals *p = dev->data->dev_private;
2149 struct tm_hierarchy *h = &p->soft.tm.h;
2150 struct tm_node_list *nl = &h->nodes;
2153 TAILQ_FOREACH(nq, nl, node) {
2154 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2155 nq->parent_node->priority != tc_id)
2158 return nq->wred_profile;
2164 #ifdef RTE_SCHED_RED
2167 wred_profiles_set(struct rte_eth_dev *dev)
2169 struct pmd_internals *p = dev->data->dev_private;
2170 struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2172 enum rte_tm_color color;
2174 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2175 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2176 struct rte_red_params *dst =
2177 &pp->red_params[tc_id][color];
2178 struct tm_wred_profile *src_wp =
2179 tm_tc_wred_profile_get(dev, tc_id);
2180 struct rte_tm_red_params *src =
2181 &src_wp->params.red_params[color];
2183 memcpy(dst, src, sizeof(*dst));
2189 #define wred_profiles_set(dev)
2193 static struct tm_shared_shaper *
2194 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2196 return (tc_node->params.n_shared_shapers) ?
2197 tm_shared_shaper_search(dev,
2198 tc_node->params.shared_shaper_id[0]) :
2202 static struct tm_shared_shaper *
2203 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2204 struct tm_node *subport_node,
2207 struct pmd_internals *p = dev->data->dev_private;
2208 struct tm_node_list *nl = &p->soft.tm.h.nodes;
2211 TAILQ_FOREACH(n, nl, node) {
2212 if (n->level != TM_NODE_LEVEL_TC ||
2213 n->parent_node->parent_node_id !=
2214 subport_node->node_id ||
2215 n->priority != tc_id)
2218 return tm_tc_shared_shaper_get(dev, n);
2225 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2227 struct pmd_internals *p = dev->data->dev_private;
2228 struct tm_hierarchy *h = &p->soft.tm.h;
2229 struct tm_node_list *nl = &h->nodes;
2230 struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2231 struct tm_wred_profile_list *wpl = &h->wred_profiles;
2232 struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2233 struct tm_shared_shaper *ss;
2235 uint32_t n_pipes_per_subport;
2237 /* Root node exists. */
2239 return -rte_tm_error_set(error,
2241 RTE_TM_ERROR_TYPE_LEVEL_ID,
2243 rte_strerror(EINVAL));
2245 /* There is at least one subport, max is not exceeded. */
2246 if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2247 return -rte_tm_error_set(error,
2249 RTE_TM_ERROR_TYPE_LEVEL_ID,
2251 rte_strerror(EINVAL));
2253 /* There is at least one pipe. */
2254 if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2255 return -rte_tm_error_set(error,
2257 RTE_TM_ERROR_TYPE_LEVEL_ID,
2259 rte_strerror(EINVAL));
2261 /* Number of pipes is the same for all subports. Maximum number of pipes
2262 * per subport is not exceeded.
2264 n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2265 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2267 if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2268 return -rte_tm_error_set(error,
2270 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2272 rte_strerror(EINVAL));
2274 TAILQ_FOREACH(ns, nl, node) {
2275 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2278 if (ns->n_children != n_pipes_per_subport)
2279 return -rte_tm_error_set(error,
2281 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2283 rte_strerror(EINVAL));
2286 /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2287 TAILQ_FOREACH(np, nl, node) {
2288 uint32_t mask = 0, mask_expected =
2289 RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2292 if (np->level != TM_NODE_LEVEL_PIPE)
2295 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2296 return -rte_tm_error_set(error,
2298 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2300 rte_strerror(EINVAL));
2302 TAILQ_FOREACH(nt, nl, node) {
2303 if (nt->level != TM_NODE_LEVEL_TC ||
2304 nt->parent_node_id != np->node_id)
2307 mask |= 1 << nt->priority;
2310 if (mask != mask_expected)
2311 return -rte_tm_error_set(error,
2313 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2315 rte_strerror(EINVAL));
2318 /* Each TC has exactly 4 packet queues. */
2319 TAILQ_FOREACH(nt, nl, node) {
2320 if (nt->level != TM_NODE_LEVEL_TC)
2323 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2324 return -rte_tm_error_set(error,
2326 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2328 rte_strerror(EINVAL));
2333 * -For each TC #i, all pipes in the same subport use the same
2334 * shared shaper (or no shared shaper) for their TC#i.
2335 * -Each shared shaper needs to have at least one user. All its
2336 * users have to be TC nodes with the same priority and the same
2339 TAILQ_FOREACH(ns, nl, node) {
2340 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2343 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2346 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2347 s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2349 TAILQ_FOREACH(nt, nl, node) {
2350 struct tm_shared_shaper *subport_ss, *tc_ss;
2352 if (nt->level != TM_NODE_LEVEL_TC ||
2353 nt->parent_node->parent_node_id !=
2357 subport_ss = s[nt->priority];
2358 tc_ss = tm_tc_shared_shaper_get(dev, nt);
2360 if (subport_ss == NULL && tc_ss == NULL)
2363 if ((subport_ss == NULL && tc_ss != NULL) ||
2364 (subport_ss != NULL && tc_ss == NULL) ||
2365 subport_ss->shared_shaper_id !=
2366 tc_ss->shared_shaper_id)
2367 return -rte_tm_error_set(error,
2369 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2371 rte_strerror(EINVAL));
2375 TAILQ_FOREACH(ss, ssl, node) {
2376 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2377 uint32_t n_users = 0;
2380 TAILQ_FOREACH(nt, nl, node) {
2381 if (nt->level != TM_NODE_LEVEL_TC ||
2382 nt->priority != nt_any->priority ||
2383 nt->parent_node->parent_node_id !=
2384 nt_any->parent_node->parent_node_id)
2390 if (ss->n_users == 0 || ss->n_users != n_users)
2391 return -rte_tm_error_set(error,
2393 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2395 rte_strerror(EINVAL));
2398 /* Not too many pipe profiles. */
2399 if (pipe_profiles_generate(dev))
2400 return -rte_tm_error_set(error,
2402 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2404 rte_strerror(EINVAL));
2407 * WRED (when used, i.e. at least one WRED profile defined):
2408 * -Each WRED profile must have at least one user.
2409 * -All leaf nodes must have their private WRED context enabled.
2410 * -For each TC #i, all leaf nodes must use the same WRED profile
2411 * for their private WRED context.
2413 if (h->n_wred_profiles) {
2414 struct tm_wred_profile *wp;
2415 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2418 TAILQ_FOREACH(wp, wpl, node)
2419 if (wp->n_users == 0)
2420 return -rte_tm_error_set(error,
2422 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2424 rte_strerror(EINVAL));
2426 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2427 w[id] = tm_tc_wred_profile_get(dev, id);
2430 return -rte_tm_error_set(error,
2432 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2434 rte_strerror(EINVAL));
2437 TAILQ_FOREACH(nq, nl, node) {
2440 if (nq->level != TM_NODE_LEVEL_QUEUE)
2443 id = nq->parent_node->priority;
2445 if (nq->wred_profile == NULL ||
2446 nq->wred_profile->wred_profile_id !=
2447 w[id]->wred_profile_id)
2448 return -rte_tm_error_set(error,
2450 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2452 rte_strerror(EINVAL));
2460 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2462 struct pmd_internals *p = dev->data->dev_private;
2463 struct tm_params *t = &p->soft.tm.params;
2464 struct tm_hierarchy *h = &p->soft.tm.h;
2466 struct tm_node_list *nl = &h->nodes;
2467 struct tm_node *root = tm_root_node_present(dev), *n;
2469 uint32_t subport_id;
2471 t->port_params = (struct rte_sched_port_params) {
2472 .name = dev->data->name,
2473 .socket = dev->data->numa_node,
2474 .rate = root->shaper_profile->params.peak.rate,
2475 .mtu = dev->data->mtu,
2477 root->shaper_profile->params.pkt_length_adjust,
2478 .n_subports_per_port = root->n_children,
2479 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2480 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2481 .qsize = {p->params.tm.qsize[0],
2482 p->params.tm.qsize[1],
2483 p->params.tm.qsize[2],
2484 p->params.tm.qsize[3],
2486 .pipe_profiles = t->pipe_profiles,
2487 .n_pipe_profiles = t->n_pipe_profiles,
2490 wred_profiles_set(dev);
2493 TAILQ_FOREACH(n, nl, node) {
2494 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2497 if (n->level != TM_NODE_LEVEL_SUBPORT)
2500 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2501 struct tm_shared_shaper *ss;
2502 struct tm_shaper_profile *sp;
2504 ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2505 sp = (ss) ? tm_shaper_profile_search(dev,
2506 ss->shaper_profile_id) :
2508 tc_rate[i] = sp->params.peak.rate;
2511 t->subport_params[subport_id] =
2512 (struct rte_sched_subport_params) {
2513 .tb_rate = n->shaper_profile->params.peak.rate,
2514 .tb_size = n->shaper_profile->params.peak.size,
2516 .tc_rate = {tc_rate[0],
2521 .tc_period = SUBPORT_TC_PERIOD,
2528 /* Traffic manager hierarchy commit */
2530 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2532 struct rte_tm_error *error)
2534 struct pmd_internals *p = dev->data->dev_private;
2538 if (p->soft.tm.hierarchy_frozen)
2539 return -rte_tm_error_set(error,
2541 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2543 rte_strerror(EBUSY));
2545 status = hierarchy_commit_check(dev, error);
2547 if (clear_on_fail) {
2548 tm_hierarchy_uninit(p);
2549 tm_hierarchy_init(p);
2555 /* Create blueprints */
2556 hierarchy_blueprints_create(dev);
2558 /* Freeze hierarchy */
2559 p->soft.tm.hierarchy_frozen = 1;
2564 #ifdef RTE_SCHED_SUBPORT_TC_OV
2567 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2569 struct pmd_internals *p = dev->data->dev_private;
2570 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2572 struct tm_node *ns = np->parent_node;
2573 uint32_t subport_id = tm_node_subport_id(dev, ns);
2575 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2576 struct rte_sched_pipe_params profile1;
2577 uint32_t pipe_profile_id;
2579 /* Derive new pipe profile. */
2580 memcpy(&profile1, profile0, sizeof(profile1));
2581 profile1.tc_ov_weight = (uint8_t)weight;
2583 /* Since implementation does not allow adding more pipe profiles after
2584 * port configuration, the pipe configuration can be successfully
2585 * updated only if the new profile is also part of the existing set of
2588 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2591 /* Update the pipe profile used by the current pipe. */
2592 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2593 (int32_t)pipe_profile_id))
2596 /* Commit changes. */
2597 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2598 np->weight = weight;
2606 update_queue_weight(struct rte_eth_dev *dev,
2607 struct tm_node *nq, uint32_t weight)
2609 struct pmd_internals *p = dev->data->dev_private;
2610 uint32_t queue_id = tm_node_queue_id(dev, nq);
2612 struct tm_node *nt = nq->parent_node;
2613 uint32_t tc_id = tm_node_tc_id(dev, nt);
2615 struct tm_node *np = nt->parent_node;
2616 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2618 struct tm_node *ns = np->parent_node;
2619 uint32_t subport_id = tm_node_subport_id(dev, ns);
2621 uint32_t pipe_queue_id =
2622 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2624 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2625 struct rte_sched_pipe_params profile1;
2626 uint32_t pipe_profile_id;
2628 /* Derive new pipe profile. */
2629 memcpy(&profile1, profile0, sizeof(profile1));
2630 profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2632 /* Since implementation does not allow adding more pipe profiles after
2633 * port configuration, the pipe configuration can be successfully
2634 * updated only if the new profile is also part of the existing set
2637 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2640 /* Update the pipe profile used by the current pipe. */
2641 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2642 (int32_t)pipe_profile_id))
2645 /* Commit changes. */
2646 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2647 nq->weight = weight;
2652 /* Traffic manager node parent update */
2654 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2656 uint32_t parent_node_id,
2659 struct rte_tm_error *error)
2663 /* Port must be started and TM used. */
2664 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2665 return -rte_tm_error_set(error,
2667 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2669 rte_strerror(EBUSY));
2671 /* Node must be valid */
2672 n = tm_node_search(dev, node_id);
2674 return -rte_tm_error_set(error,
2676 RTE_TM_ERROR_TYPE_NODE_ID,
2678 rte_strerror(EINVAL));
2680 /* Parent node must be the same */
2681 if (n->parent_node_id != parent_node_id)
2682 return -rte_tm_error_set(error,
2684 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2686 rte_strerror(EINVAL));
2688 /* Priority must be the same */
2689 if (n->priority != priority)
2690 return -rte_tm_error_set(error,
2692 RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2694 rte_strerror(EINVAL));
2696 /* weight: must be 1 .. 255 */
2697 if (weight == 0 || weight >= UINT8_MAX)
2698 return -rte_tm_error_set(error,
2700 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2702 rte_strerror(EINVAL));
2705 case TM_NODE_LEVEL_PORT:
2706 return -rte_tm_error_set(error,
2708 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2710 rte_strerror(EINVAL));
2712 case TM_NODE_LEVEL_SUBPORT:
2713 return -rte_tm_error_set(error,
2715 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2717 rte_strerror(EINVAL));
2719 case TM_NODE_LEVEL_PIPE:
2720 #ifdef RTE_SCHED_SUBPORT_TC_OV
2721 if (update_pipe_weight(dev, n, weight))
2722 return -rte_tm_error_set(error,
2724 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2726 rte_strerror(EINVAL));
2729 return -rte_tm_error_set(error,
2731 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2733 rte_strerror(EINVAL));
2736 case TM_NODE_LEVEL_TC:
2737 return -rte_tm_error_set(error,
2739 RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2741 rte_strerror(EINVAL));
2743 case TM_NODE_LEVEL_QUEUE:
2746 if (update_queue_weight(dev, n, weight))
2747 return -rte_tm_error_set(error,
2749 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2751 rte_strerror(EINVAL));
2757 update_subport_rate(struct rte_eth_dev *dev,
2759 struct tm_shaper_profile *sp)
2761 struct pmd_internals *p = dev->data->dev_private;
2762 uint32_t subport_id = tm_node_subport_id(dev, ns);
2764 struct rte_sched_subport_params subport_params;
2766 /* Derive new subport configuration. */
2767 memcpy(&subport_params,
2768 &p->soft.tm.params.subport_params[subport_id],
2769 sizeof(subport_params));
2770 subport_params.tb_rate = sp->params.peak.rate;
2771 subport_params.tb_size = sp->params.peak.size;
2773 /* Update the subport configuration. */
2774 if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2778 /* Commit changes. */
2779 ns->shaper_profile->n_users--;
2781 ns->shaper_profile = sp;
2782 ns->params.shaper_profile_id = sp->shaper_profile_id;
2785 memcpy(&p->soft.tm.params.subport_params[subport_id],
2787 sizeof(subport_params));
2793 update_pipe_rate(struct rte_eth_dev *dev,
2795 struct tm_shaper_profile *sp)
2797 struct pmd_internals *p = dev->data->dev_private;
2798 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2800 struct tm_node *ns = np->parent_node;
2801 uint32_t subport_id = tm_node_subport_id(dev, ns);
2803 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2804 struct rte_sched_pipe_params profile1;
2805 uint32_t pipe_profile_id;
2807 /* Derive new pipe profile. */
2808 memcpy(&profile1, profile0, sizeof(profile1));
2809 profile1.tb_rate = sp->params.peak.rate;
2810 profile1.tb_size = sp->params.peak.size;
2812 /* Since implementation does not allow adding more pipe profiles after
2813 * port configuration, the pipe configuration can be successfully
2814 * updated only if the new profile is also part of the existing set of
2817 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2820 /* Update the pipe profile used by the current pipe. */
2821 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2822 (int32_t)pipe_profile_id))
2825 /* Commit changes. */
2826 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2827 np->shaper_profile->n_users--;
2828 np->shaper_profile = sp;
2829 np->params.shaper_profile_id = sp->shaper_profile_id;
2836 update_tc_rate(struct rte_eth_dev *dev,
2838 struct tm_shaper_profile *sp)
2840 struct pmd_internals *p = dev->data->dev_private;
2841 uint32_t tc_id = tm_node_tc_id(dev, nt);
2843 struct tm_node *np = nt->parent_node;
2844 uint32_t pipe_id = tm_node_pipe_id(dev, np);
2846 struct tm_node *ns = np->parent_node;
2847 uint32_t subport_id = tm_node_subport_id(dev, ns);
2849 struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2850 struct rte_sched_pipe_params profile1;
2851 uint32_t pipe_profile_id;
2853 /* Derive new pipe profile. */
2854 memcpy(&profile1, profile0, sizeof(profile1));
2855 profile1.tc_rate[tc_id] = sp->params.peak.rate;
2857 /* Since implementation does not allow adding more pipe profiles after
2858 * port configuration, the pipe configuration can be successfully
2859 * updated only if the new profile is also part of the existing set of
2862 if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2865 /* Update the pipe profile used by the current pipe. */
2866 if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2867 (int32_t)pipe_profile_id))
2870 /* Commit changes. */
2871 pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2872 nt->shaper_profile->n_users--;
2873 nt->shaper_profile = sp;
2874 nt->params.shaper_profile_id = sp->shaper_profile_id;
2880 /* Traffic manager node shaper update */
2882 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2884 uint32_t shaper_profile_id,
2885 struct rte_tm_error *error)
2888 struct tm_shaper_profile *sp;
2890 /* Port must be started and TM used. */
2891 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2892 return -rte_tm_error_set(error,
2894 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2896 rte_strerror(EBUSY));
2898 /* Node must be valid */
2899 n = tm_node_search(dev, node_id);
2901 return -rte_tm_error_set(error,
2903 RTE_TM_ERROR_TYPE_NODE_ID,
2905 rte_strerror(EINVAL));
2907 /* Shaper profile must be valid. */
2908 sp = tm_shaper_profile_search(dev, shaper_profile_id);
2910 return -rte_tm_error_set(error,
2912 RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2914 rte_strerror(EINVAL));
2917 case TM_NODE_LEVEL_PORT:
2918 return -rte_tm_error_set(error,
2920 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2922 rte_strerror(EINVAL));
2924 case TM_NODE_LEVEL_SUBPORT:
2925 if (update_subport_rate(dev, n, sp))
2926 return -rte_tm_error_set(error,
2928 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2930 rte_strerror(EINVAL));
2933 case TM_NODE_LEVEL_PIPE:
2934 if (update_pipe_rate(dev, n, sp))
2935 return -rte_tm_error_set(error,
2937 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2939 rte_strerror(EINVAL));
2942 case TM_NODE_LEVEL_TC:
2943 if (update_tc_rate(dev, n, sp))
2944 return -rte_tm_error_set(error,
2946 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2948 rte_strerror(EINVAL));
2951 case TM_NODE_LEVEL_QUEUE:
2954 return -rte_tm_error_set(error,
2956 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2958 rte_strerror(EINVAL));
2962 static inline uint32_t
2963 tm_port_queue_id(struct rte_eth_dev *dev,
2964 uint32_t port_subport_id,
2965 uint32_t subport_pipe_id,
2966 uint32_t pipe_tc_id,
2967 uint32_t tc_queue_id)
2969 struct pmd_internals *p = dev->data->dev_private;
2970 struct tm_hierarchy *h = &p->soft.tm.h;
2971 uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2972 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2974 uint32_t port_pipe_id =
2975 port_subport_id * n_pipes_per_subport + subport_pipe_id;
2976 uint32_t port_tc_id =
2977 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
2978 uint32_t port_queue_id =
2979 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
2981 return port_queue_id;
2985 read_port_stats(struct rte_eth_dev *dev,
2987 struct rte_tm_node_stats *stats,
2988 uint64_t *stats_mask,
2991 struct pmd_internals *p = dev->data->dev_private;
2992 struct tm_hierarchy *h = &p->soft.tm.h;
2993 uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2994 uint32_t subport_id;
2996 for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
2997 struct rte_sched_subport_stats s;
3001 int status = rte_sched_subport_read_stats(
3009 /* Stats accumulate */
3010 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3012 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3013 nr->stats.n_bytes +=
3014 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3015 nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3016 s.n_pkts_tc_dropped[id];
3017 nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3018 s.n_bytes_tc_dropped[id];
3024 memcpy(stats, &nr->stats, sizeof(*stats));
3027 *stats_mask = STATS_MASK_DEFAULT;
3031 memset(&nr->stats, 0, sizeof(nr->stats));
3037 read_subport_stats(struct rte_eth_dev *dev,
3039 struct rte_tm_node_stats *stats,
3040 uint64_t *stats_mask,
3043 struct pmd_internals *p = dev->data->dev_private;
3044 uint32_t subport_id = tm_node_subport_id(dev, ns);
3045 struct rte_sched_subport_stats s;
3046 uint32_t tc_ov, tc_id;
3049 int status = rte_sched_subport_read_stats(
3057 /* Stats accumulate */
3058 for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3060 s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3061 ns->stats.n_bytes +=
3062 s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3063 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3064 s.n_pkts_tc_dropped[tc_id];
3065 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3066 s.n_bytes_tc_dropped[tc_id];
3071 memcpy(stats, &ns->stats, sizeof(*stats));
3074 *stats_mask = STATS_MASK_DEFAULT;
3078 memset(&ns->stats, 0, sizeof(ns->stats));
3084 read_pipe_stats(struct rte_eth_dev *dev,
3086 struct rte_tm_node_stats *stats,
3087 uint64_t *stats_mask,
3090 struct pmd_internals *p = dev->data->dev_private;
3092 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3094 struct tm_node *ns = np->parent_node;
3095 uint32_t subport_id = tm_node_subport_id(dev, ns);
3100 for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3101 struct rte_sched_queue_stats s;
3104 uint32_t qid = tm_port_queue_id(dev,
3107 i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3108 i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3110 int status = rte_sched_queue_read_stats(
3118 /* Stats accumulate */
3119 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3120 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3121 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3122 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3124 np->stats.leaf.n_pkts_queued = qlen;
3129 memcpy(stats, &np->stats, sizeof(*stats));
3132 *stats_mask = STATS_MASK_DEFAULT;
3136 memset(&np->stats, 0, sizeof(np->stats));
3142 read_tc_stats(struct rte_eth_dev *dev,
3144 struct rte_tm_node_stats *stats,
3145 uint64_t *stats_mask,
3148 struct pmd_internals *p = dev->data->dev_private;
3150 uint32_t tc_id = tm_node_tc_id(dev, nt);
3152 struct tm_node *np = nt->parent_node;
3153 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3155 struct tm_node *ns = np->parent_node;
3156 uint32_t subport_id = tm_node_subport_id(dev, ns);
3161 for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3162 struct rte_sched_queue_stats s;
3165 uint32_t qid = tm_port_queue_id(dev,
3171 int status = rte_sched_queue_read_stats(
3179 /* Stats accumulate */
3180 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3181 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3182 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3183 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3185 nt->stats.leaf.n_pkts_queued = qlen;
3190 memcpy(stats, &nt->stats, sizeof(*stats));
3193 *stats_mask = STATS_MASK_DEFAULT;
3197 memset(&nt->stats, 0, sizeof(nt->stats));
3203 read_queue_stats(struct rte_eth_dev *dev,
3205 struct rte_tm_node_stats *stats,
3206 uint64_t *stats_mask,
3209 struct pmd_internals *p = dev->data->dev_private;
3210 struct rte_sched_queue_stats s;
3213 uint32_t queue_id = tm_node_queue_id(dev, nq);
3215 struct tm_node *nt = nq->parent_node;
3216 uint32_t tc_id = tm_node_tc_id(dev, nt);
3218 struct tm_node *np = nt->parent_node;
3219 uint32_t pipe_id = tm_node_pipe_id(dev, np);
3221 struct tm_node *ns = np->parent_node;
3222 uint32_t subport_id = tm_node_subport_id(dev, ns);
3225 uint32_t qid = tm_port_queue_id(dev,
3231 int status = rte_sched_queue_read_stats(
3239 /* Stats accumulate */
3240 nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3241 nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3242 nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3243 nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3245 nq->stats.leaf.n_pkts_queued = qlen;
3249 memcpy(stats, &nq->stats, sizeof(*stats));
3252 *stats_mask = STATS_MASK_QUEUE;
3256 memset(&nq->stats, 0, sizeof(nq->stats));
3261 /* Traffic manager read stats counters for specific node */
3263 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3265 struct rte_tm_node_stats *stats,
3266 uint64_t *stats_mask,
3268 struct rte_tm_error *error)
3272 /* Port must be started and TM used. */
3273 if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3274 return -rte_tm_error_set(error,
3276 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3278 rte_strerror(EBUSY));
3280 /* Node must be valid */
3281 n = tm_node_search(dev, node_id);
3283 return -rte_tm_error_set(error,
3285 RTE_TM_ERROR_TYPE_NODE_ID,
3287 rte_strerror(EINVAL));
3290 case TM_NODE_LEVEL_PORT:
3291 if (read_port_stats(dev, n, stats, stats_mask, clear))
3292 return -rte_tm_error_set(error,
3294 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3296 rte_strerror(EINVAL));
3299 case TM_NODE_LEVEL_SUBPORT:
3300 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3301 return -rte_tm_error_set(error,
3303 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3305 rte_strerror(EINVAL));
3308 case TM_NODE_LEVEL_PIPE:
3309 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3310 return -rte_tm_error_set(error,
3312 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3314 rte_strerror(EINVAL));
3317 case TM_NODE_LEVEL_TC:
3318 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3319 return -rte_tm_error_set(error,
3321 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3323 rte_strerror(EINVAL));
3326 case TM_NODE_LEVEL_QUEUE:
3328 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3329 return -rte_tm_error_set(error,
3331 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3333 rte_strerror(EINVAL));
3338 const struct rte_tm_ops pmd_tm_ops = {
3339 .node_type_get = pmd_tm_node_type_get,
3340 .capabilities_get = pmd_tm_capabilities_get,
3341 .level_capabilities_get = pmd_tm_level_capabilities_get,
3342 .node_capabilities_get = pmd_tm_node_capabilities_get,
3344 .wred_profile_add = pmd_tm_wred_profile_add,
3345 .wred_profile_delete = pmd_tm_wred_profile_delete,
3346 .shared_wred_context_add_update = NULL,
3347 .shared_wred_context_delete = NULL,
3349 .shaper_profile_add = pmd_tm_shaper_profile_add,
3350 .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3351 .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3352 .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3354 .node_add = pmd_tm_node_add,
3355 .node_delete = pmd_tm_node_delete,
3356 .node_suspend = NULL,
3357 .node_resume = NULL,
3358 .hierarchy_commit = pmd_tm_hierarchy_commit,
3360 .node_parent_update = pmd_tm_node_parent_update,
3361 .node_shaper_update = pmd_tm_node_shaper_update,
3362 .node_shared_shaper_update = NULL,
3363 .node_stats_update = NULL,
3364 .node_wfq_weight_mode_update = NULL,
3365 .node_cman_update = NULL,
3366 .node_wred_context_update = NULL,
3367 .node_shared_wred_context_update = NULL,
3369 .node_stats_read = pmd_tm_node_stats_read,