-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <stdint.h>
#include <string.h>
#include <rte_malloc.h>
+#include <rte_string_fns.h>
#include "rte_eth_softnic_internals.h"
#include "rte_eth_softnic.h"
-#define BYTES_IN_MBPS (1000 * 1000 / 8)
#define SUBPORT_TC_PERIOD 10
#define PIPE_TC_PERIOD 40
int
-tm_params_check(struct pmd_params *params, uint32_t hard_rate)
+softnic_tmgr_init(struct pmd_internals *p)
{
- uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
- uint32_t i;
+ TAILQ_INIT(&p->tmgr_port_list);
- /* rate */
- if (params->soft.tm.rate) {
- if (params->soft.tm.rate > hard_rate_bytes_per_sec)
- return -EINVAL;
- } else {
- params->soft.tm.rate =
- (hard_rate_bytes_per_sec > UINT32_MAX) ?
- UINT32_MAX : hard_rate_bytes_per_sec;
+ return 0;
+}
+
+void
+softnic_tmgr_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
+ if (tmgr_port == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
+ rte_sched_port_free(tmgr_port->s);
+ free(tmgr_port);
}
+}
- /* nb_queues */
- if (params->soft.tm.nb_queues == 0)
- return -EINVAL;
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
- if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
- params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
+ if (name == NULL)
+ return NULL;
- params->soft.tm.nb_queues =
- rte_align32pow2(params->soft.tm.nb_queues);
+ TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
+ if (strcmp(tmgr_port->name, name) == 0)
+ return tmgr_port;
- /* qsize */
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- if (params->soft.tm.qsize[i] == 0)
- return -EINVAL;
+ return NULL;
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
+ struct tm_params *t = &p->soft.tm.params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tmgr_port_find(p, name))
+ return NULL;
+
+ /*
+ * Resource
+ */
+
+ /* Is hierarchy frozen? */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return NULL;
+
+ /* Port */
+ sched = rte_sched_port_config(&t->port_params);
+ if (sched == NULL)
+ return NULL;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport =
+ t->subport_params[subport_id].n_pipes_per_subport_enabled;
+ uint32_t pipe_id;
+ int status;
+
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &t->subport_params[subport_id],
+ t->subport_to_profile[subport_id]);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+
+ /* Pipe */
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
- params->soft.tm.qsize[i] =
- rte_align32pow2(params->soft.tm.qsize[i]);
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+ }
}
- /* enq_bsz, deq_bsz */
- if (params->soft.tm.enq_bsz == 0 ||
- params->soft.tm.deq_bsz == 0 ||
- params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
- return -EINVAL;
+ /* Node allocation */
+ tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
+ if (tmgr_port == NULL) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
- return 0;
+ /* Node fill in */
+ strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
+ tmgr_port->s = sched;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
+
+ return tmgr_port;
}
-static void
+static struct rte_sched_port *
+SCHED(struct pmd_internals *p)
+{
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(p, "TMGR");
+ if (tmgr_port == NULL)
+ return NULL;
+
+ return tmgr_port->s;
+}
+
+void
tm_hierarchy_init(struct pmd_internals *p)
{
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+ memset(&p->soft.tm, 0, sizeof(p->soft.tm));
/* Initialize shaper profile list */
TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
TAILQ_INIT(&p->soft.tm.h.nodes);
}
-static void
-tm_hierarchy_uninit(struct pmd_internals *p)
+void
+tm_hierarchy_free(struct pmd_internals *p)
{
/* Remove all nodes*/
for ( ; ; ) {
free(shaper_profile);
}
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
-}
-
-int
-tm_init(struct pmd_internals *p,
- struct pmd_params *params,
- int numa_node)
-{
- uint32_t enq_bsz = params->soft.tm.enq_bsz;
- uint32_t deq_bsz = params->soft.tm.deq_bsz;
-
- p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
- 2 * enq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_enq == NULL)
- return -ENOMEM;
-
- p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
- deq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_deq == NULL) {
- rte_free(p->soft.tm.pkts_enq);
- return -ENOMEM;
- }
-
tm_hierarchy_init(p);
-
- return 0;
-}
-
-void
-tm_free(struct pmd_internals *p)
-{
- tm_hierarchy_uninit(p);
- rte_free(p->soft.tm.pkts_enq);
- rte_free(p->soft.tm.pkts_deq);
-}
-
-int
-tm_start(struct pmd_internals *p)
-{
- struct tm_params *t = &p->soft.tm.params;
- uint32_t n_subports, subport_id;
- int status;
-
- /* Is hierarchy frozen? */
- if (p->soft.tm.hierarchy_frozen == 0)
- return -1;
-
- /* Port */
- p->soft.tm.sched = rte_sched_port_config(&t->port_params);
- if (p->soft.tm.sched == NULL)
- return -1;
-
- /* Subport */
- n_subports = t->port_params.n_subports_per_port;
- for (subport_id = 0; subport_id < n_subports; subport_id++) {
- uint32_t n_pipes_per_subport =
- t->port_params.n_pipes_per_subport;
- uint32_t pipe_id;
-
- status = rte_sched_subport_config(p->soft.tm.sched,
- subport_id,
- &t->subport_params[subport_id]);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
-
- /* Pipe */
- n_pipes_per_subport = t->port_params.n_pipes_per_subport;
- for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
- int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
- pipe_id;
- int profile_id = t->pipe_to_profile[pos];
-
- if (profile_id < 0)
- continue;
-
- status = rte_sched_pipe_config(p->soft.tm.sched,
- subport_id,
- pipe_id,
- profile_id);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
- }
- }
-
- return 0;
-}
-
-void
-tm_stop(struct pmd_internals *p)
-{
- if (p->soft.tm.sched)
- rte_sched_port_free(p->soft.tm.sched);
-
- /* Unfreeze hierarchy */
- p->soft.tm.hierarchy_frozen = 0;
}
static struct tm_shaper_profile *
tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
{
struct pmd_internals *p = dev->data->dev_private;
- uint32_t n_queues_max = p->params.soft.tm.nb_queues;
- uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+ uint32_t n_queues_max = p->params.tm.n_queues;
+ uint32_t n_tc_max =
+ (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ / RTE_SCHED_QUEUES_PER_PIPE;
uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
uint32_t n_subports_max = n_pipes_max;
uint32_t n_root_max = 1;
NULL,
rte_strerror(EINVAL));
- *is_leaf = node_id < p->params.soft.tm.nb_queues;
+ *is_leaf = node_id < p->params.tm.n_queues;
return 0;
}
-#ifdef RTE_SCHED_RED
+#ifdef RTE_SCHED_CMAN
#define WRED_SUPPORTED 1
#else
#define WRED_SUPPORTED 0
.shaper_private_dual_rate_n_max = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = UINT32_MAX,
.shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
.shaper_shared_dual_rate_n_max = 0,
.shaper_shared_rate_min = 1,
.shaper_shared_rate_max = UINT32_MAX,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 1,
.shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
.shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_head_drop_supported = 0,
.cman_wred_context_n_max = 0,
.cman_wred_context_private_n_max = 0,
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_SUBPORT] = {
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_groups_max = 1,
#ifdef RTE_SCHED_SUBPORT_TC_OV
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
#else
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
#endif
+
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_PIPE] = {
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.sched_n_children_max =
RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
.sched_wfq_n_children_per_group_max = 1,
.sched_wfq_n_groups_max = 0,
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 0,
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_TC] = {
.non_leaf_nodes_identical = 1,
.leaf_nodes_identical = 0,
- .nonleaf = {
+ {.nonleaf = {
.shaper_private_supported = 1,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 1,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 1,
.sched_n_children_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
.stats_mask = STATS_MASK_DEFAULT,
- },
+ } },
},
[TM_NODE_LEVEL_QUEUE] = {
.non_leaf_nodes_identical = 0,
.leaf_nodes_identical = 1,
- .leaf = {
+ {.leaf = {
.shaper_private_supported = 0,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 0,
.shaper_private_rate_max = 0,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 0,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
.stats_mask = STATS_MASK_QUEUE,
- },
+ } },
},
};
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = 1,
- },
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
- },
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max =
RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
.sched_sp_n_priorities_max =
.sched_wfq_n_children_per_group_max = 1,
.sched_wfq_n_groups_max = 0,
.sched_wfq_weight_max = 1,
- },
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 0,
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 1,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 1,
- .nonleaf = {
+ {.nonleaf = {
.sched_n_children_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
- },
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
+ } },
.stats_mask = STATS_MASK_DEFAULT,
},
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 0,
.shaper_private_rate_max = 0,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 0,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
- .leaf = {
+ {.leaf = {
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
- },
+ } },
.stats_mask = STATS_MASK_QUEUE,
},
NULL,
rte_strerror(EINVAL));
+ /* Packet mode is not supported. */
+ if (profile->packet_mode != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE,
+ NULL,
+ rte_strerror(EINVAL));
return 0;
}
return NULL;
}
+static int
+subport_profile_exists(struct rte_eth_dev *dev,
+ struct rte_sched_subport_profile_params *sp,
+ uint32_t *subport_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t i;
+
+ for (i = 0; i < t->n_subport_profiles; i++)
+ if (memcmp(&t->subport_profile[i], sp, sizeof(*sp)) == 0) {
+ if (subport_profile_id)
+ *subport_profile_id = i;
+ return 1;
+ }
+
+ return 0;
+}
+
static int
update_subport_tc_rate(struct rte_eth_dev *dev,
struct tm_node *nt,
struct tm_shared_shaper *ss,
struct tm_shaper_profile *sp_new)
{
+ struct rte_sched_subport_profile_params subport_profile;
struct pmd_internals *p = dev->data->dev_private;
uint32_t tc_id = tm_node_tc_id(dev, nt);
-
struct tm_node *np = nt->parent_node;
-
struct tm_node *ns = np->parent_node;
uint32_t subport_id = tm_node_subport_id(dev, ns);
-
- struct rte_sched_subport_params subport_params;
-
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t subport_profile_id;
struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
ss->shaper_profile_id);
+ if (subport_id >= TM_MAX_SUBPORT_PROFILE)
+ return -1;
+
+ subport_profile_id = t->subport_to_profile[subport_id];
+
/* Derive new subport configuration. */
- memcpy(&subport_params,
- &p->soft.tm.params.subport_params[subport_id],
- sizeof(subport_params));
- subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
+ memcpy(&subport_profile,
+ &p->soft.tm.params.subport_profile[subport_profile_id],
+ sizeof(subport_profile));
+ subport_profile.tc_rate[tc_id] = sp_new->params.peak.rate;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched,
- subport_id, &subport_params))
+ if (rte_sched_subport_config(SCHED(p),
+ subport_id, NULL, subport_profile_id))
return -1;
/* Commit changes. */
ss->shaper_profile_id = sp_new->shaper_profile_id;
sp_new->n_users++;
- memcpy(&p->soft.tm.params.subport_params[subport_id],
- &subport_params,
- sizeof(subport_params));
+ memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
+ &subport_profile,
+ sizeof(subport_profile));
return 0;
}
struct rte_tm_error *error)
{
struct tm_wred_profile *wp;
- enum rte_tm_color color;
+ enum rte_color color;
/* WRED profile ID must not be NONE. */
if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
NULL,
rte_strerror(EINVAL));
- /* min_th <= max_th, max_th > 0 */
- for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
- uint16_t min_th = profile->red_params[color].min_th;
- uint16_t max_th = profile->red_params[color].max_th;
+ /* WRED profile should be in packet mode */
+ if (profile->packet_mode == 0)
+ return -rte_tm_error_set(error,
+ ENOTSUP,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(ENOTSUP));
- if (min_th > max_th || max_th == 0)
+ /* min_th <= max_th, max_th > 0 */
+ for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
+ uint32_t min_th = profile->red_params[color].min_th;
+ uint32_t max_th = profile->red_params[color].max_th;
+
+ if (min_th > max_th ||
+ max_th == 0 ||
+ min_th > UINT16_MAX ||
+ max_th > UINT16_MAX)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_WRED_PROFILE,
params->shaper_profile_id);
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL,
rte_strerror(EINVAL));
- /* Shaper must be valid.
- * Shaper profile peak rate must fit the configured port rate.
- */
+ /* Shaper must be valid */
if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
- sp == NULL ||
- sp->params.peak.rate > p->params.soft.tm.rate)
+ sp == NULL)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
struct pmd_internals *p = dev->data->dev_private;
/* node type: leaf */
- if (node_id >= p->params.soft.tm.nb_queues)
+ if (node_id >= p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
/* Traffic Class (TC) */
pp->tc_period = PIPE_TC_PERIOD;
-#ifdef RTE_SCHED_SUBPORT_TC_OV
pp->tc_ov_weight = np->weight;
-#endif
TAILQ_FOREACH(nt, nl, node) {
uint32_t queue_id = 0;
/* Queue */
TAILQ_FOREACH(nq, nl, node) {
- uint32_t pipe_queue_id;
if (nq->level != TM_NODE_LEVEL_QUEUE ||
nq->parent_node_id != nt->node_id)
continue;
- pipe_queue_id = nt->priority *
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
- pp->wrr_weights[pipe_queue_id] = nq->weight;
+ if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
+ pp->wrr_weights[queue_id] = nq->weight;
queue_id++;
}
struct pmd_internals *p = dev->data->dev_private;
struct tm_params *t = &p->soft.tm.params;
- if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
+ if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
*pipe_profile_id = t->n_pipe_profiles;
return 1;
}
struct rte_sched_pipe_params pp;
uint32_t pos;
+ memset(&pp, 0, sizeof(pp));
+
if (np->level != TM_NODE_LEVEL_PIPE ||
np->parent_node_id != ns->node_id)
continue;
return NULL;
}
-#ifdef RTE_SCHED_RED
+#ifdef RTE_SCHED_CMAN
static void
-wred_profiles_set(struct rte_eth_dev *dev)
+wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
{
struct pmd_internals *p = dev->data->dev_private;
- struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
+ struct rte_sched_subport_params *pp =
+ &p->soft.tm.params.subport_params[subport_id];
+
uint32_t tc_id;
- enum rte_tm_color color;
+ enum rte_color color;
for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
- for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+ for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
struct rte_red_params *dst =
- &pp->red_params[tc_id][color];
+ &pp->cman_params->red_params[tc_id][color];
struct tm_wred_profile *src_wp =
tm_tc_wred_profile_get(dev, tc_id);
struct rte_tm_red_params *src =
#else
-#define wred_profiles_set(dev)
+#define wred_profiles_set(dev, subport_id)
#endif
return NULL;
}
+static struct rte_sched_subport_profile_params *
+subport_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
+
+ if (subport_id >= TM_MAX_SUBPORT_PROFILE)
+ return NULL;
+
+ return &t->subport_profile[subport_id];
+}
+
+static void
+subport_profile_mark(struct rte_eth_dev *dev,
+ uint32_t subport_id,
+ uint32_t subport_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ t->subport_to_profile[subport_id] = subport_profile_id;
+}
+
+static void
+subport_profile_install(struct rte_eth_dev *dev,
+ struct rte_sched_subport_profile_params *sp,
+ uint32_t subport_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ memcpy(&t->subport_profile[subport_profile_id],
+ sp, sizeof(*sp));
+ t->n_subport_profiles++;
+}
+
+static int
+subport_profile_free_exists(struct rte_eth_dev *dev,
+ uint32_t *subport_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ if (t->n_subport_profiles < TM_MAX_SUBPORT_PROFILE) {
+ *subport_profile_id = t->n_subport_profiles;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+subport_profile_build(struct rte_eth_dev *dev, struct tm_node *np,
+ struct rte_sched_subport_profile_params *sp)
+{
+ uint32_t i;
+ memset(sp, 0, sizeof(*sp));
+
+ sp->tb_rate = np->shaper_profile->params.peak.rate;
+ sp->tb_size = np->shaper_profile->params.peak.size;
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ struct tm_shared_shaper *ss;
+ struct tm_shaper_profile *ssp;
+
+ ss = tm_subport_tc_shared_shaper_get(dev, np, i);
+ ssp = (ss) ? tm_shaper_profile_search(dev,
+ ss->shaper_profile_id) :
+ np->shaper_profile;
+ sp->tc_rate[i] = ssp->params.peak.rate;
+ }
+
+ /* Traffic Class (TC) */
+ sp->tc_period = SUBPORT_TC_PERIOD;
+}
+
+static int
+subport_profiles_generate(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *ns;
+ uint32_t subport_id;
+
+ /* Objective: Fill in the following fields in struct tm_params:
+ * - subport_profiles
+ * - n_subport_profiles
+ * - subport_to_profile
+ */
+
+ subport_id = 0;
+ TAILQ_FOREACH(ns, nl, node) {
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ struct rte_sched_subport_profile_params sp;
+ uint32_t pos;
+
+ memset(&sp, 0, sizeof(sp));
+
+ subport_profile_build(dev, ns, &sp);
+
+ if (!subport_profile_exists(dev, &sp, &pos)) {
+ if (!subport_profile_free_exists(dev, &pos))
+ return -1;
+
+ subport_profile_install(dev, &sp, pos);
+ }
+
+ subport_profile_mark(dev, subport_id, pos);
+
+ subport_id++;
+ }
+
+ return 0;
+}
+
+
static int
hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
{
rte_strerror(EINVAL));
}
- /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
+ /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
TAILQ_FOREACH(np, nl, node) {
uint32_t mask = 0, mask_expected =
RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
rte_strerror(EINVAL));
}
- /* Each TC has exactly 4 packet queues. */
+ /** Each Strict priority TC has exactly 1 packet queues while
+ * lowest priority TC (Best-effort) has 4 queues.
+ */
TAILQ_FOREACH(nt, nl, node) {
if (nt->level != TM_NODE_LEVEL_TC)
continue;
- if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
rte_strerror(EINVAL));
}
+ /* Not too many subport profiles. */
+ if (subport_profiles_generate(dev))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+
/* Not too many pipe profiles. */
if (pipe_profiles_generate(dev))
return -rte_tm_error_set(error,
.frame_overhead =
root->shaper_profile->params.pkt_length_adjust,
.n_subports_per_port = root->n_children,
- .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
- h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
- .qsize = {p->params.soft.tm.qsize[0],
- p->params.soft.tm.qsize[1],
- p->params.soft.tm.qsize[2],
- p->params.soft.tm.qsize[3],
- },
- .pipe_profiles = t->pipe_profiles,
- .n_pipe_profiles = t->n_pipe_profiles,
+ .n_subport_profiles = t->n_subport_profiles,
+ .subport_profiles = t->subport_profile,
+ .n_max_subport_profiles = TM_MAX_SUBPORT_PROFILE,
+ .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
};
- wred_profiles_set(dev);
-
subport_id = 0;
TAILQ_FOREACH(n, nl, node) {
- uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t i;
if (n->level != TM_NODE_LEVEL_SUBPORT)
continue;
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- struct tm_shared_shaper *ss;
- struct tm_shaper_profile *sp;
-
- ss = tm_subport_tc_shared_shaper_get(dev, n, i);
- sp = (ss) ? tm_shaper_profile_search(dev,
- ss->shaper_profile_id) :
- n->shaper_profile;
- tc_rate[i] = sp->params.peak.rate;
- }
-
t->subport_params[subport_id] =
(struct rte_sched_subport_params) {
- .tb_rate = n->shaper_profile->params.peak.rate,
- .tb_size = n->shaper_profile->params.peak.size,
-
- .tc_rate = {tc_rate[0],
- tc_rate[1],
- tc_rate[2],
- tc_rate[3],
- },
- .tc_period = SUBPORT_TC_PERIOD,
+ .n_pipes_per_subport_enabled =
+ h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
+ .qsize = {p->params.tm.qsize[0],
+ p->params.tm.qsize[1],
+ p->params.tm.qsize[2],
+ p->params.tm.qsize[3],
+ p->params.tm.qsize[4],
+ p->params.tm.qsize[5],
+ p->params.tm.qsize[6],
+ p->params.tm.qsize[7],
+ p->params.tm.qsize[8],
+ p->params.tm.qsize[9],
+ p->params.tm.qsize[10],
+ p->params.tm.qsize[11],
+ p->params.tm.qsize[12],
+ },
+ .pipe_profiles = t->pipe_profiles,
+ .n_pipe_profiles = t->n_pipe_profiles,
+ .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
};
-
+ wred_profiles_set(dev, subport_id);
subport_id++;
}
}
status = hierarchy_commit_check(dev, error);
if (status) {
- if (clear_on_fail) {
- tm_hierarchy_uninit(p);
- tm_hierarchy_init(p);
- }
+ if (clear_on_fail)
+ tm_hierarchy_free(p);
return status;
}
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
uint32_t queue_id = tm_node_queue_id(dev, nq);
struct tm_node *nt = nq->parent_node;
- uint32_t tc_id = tm_node_tc_id(dev, nt);
struct tm_node *np = nt->parent_node;
uint32_t pipe_id = tm_node_pipe_id(dev, np);
struct tm_node *ns = np->parent_node;
uint32_t subport_id = tm_node_subport_id(dev, ns);
- uint32_t pipe_queue_id =
- tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+ uint32_t pipe_be_queue_id =
+ queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
struct rte_sched_pipe_params profile1;
/* Derive new pipe profile. */
memcpy(&profile1, profile0, sizeof(profile1));
- profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
+ profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
/* Since implementation does not allow adding more pipe profiles after
* port configuration, the pipe configuration can be successfully
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
struct pmd_internals *p = dev->data->dev_private;
uint32_t subport_id = tm_node_subport_id(dev, ns);
- struct rte_sched_subport_params subport_params;
+ struct rte_sched_subport_profile_params *profile0 =
+ subport_profile_get(dev, ns);
+ struct rte_sched_subport_profile_params profile1;
+ uint32_t subport_profile_id;
- /* Derive new subport configuration. */
- memcpy(&subport_params,
- &p->soft.tm.params.subport_params[subport_id],
- sizeof(subport_params));
- subport_params.tb_rate = sp->params.peak.rate;
- subport_params.tb_size = sp->params.peak.size;
+ if (profile0 == NULL)
+ return -1;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tb_rate = sp->params.peak.rate;
+ profile1.tb_size = sp->params.peak.size;
+
+ /* Since implementation does not allow adding more subport profiles
+ * after port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (subport_profile_exists(dev, &profile1, &subport_profile_id) == 0)
+ return -1;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
- &subport_params))
+ if (rte_sched_subport_config(SCHED(p), subport_id,
+ NULL, subport_profile_id))
return -1;
/* Commit changes. */
ns->params.shaper_profile_id = sp->shaper_profile_id;
sp->n_users++;
- memcpy(&p->soft.tm.params.subport_params[subport_id],
- &subport_params,
- sizeof(subport_params));
+ subport_profile_mark(dev, subport_id, subport_profile_id);
+
+ memcpy(&p->soft.tm.params.subport_profile[subport_profile_id],
+ &profile1,
+ sizeof(profile1));
return 0;
}
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
uint32_t port_pipe_id =
port_subport_id * n_pipes_per_subport + subport_pipe_id;
- uint32_t port_tc_id =
- port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
+
uint32_t port_queue_id =
- port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
+ port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
return port_queue_id;
}
uint32_t tc_ov, id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
nr->stats.n_bytes +=
s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
- nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
s.n_pkts_tc_dropped[id];
- nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
s.n_bytes_tc_dropped[id];
}
}
uint32_t tc_ov, tc_id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
ns->stats.n_bytes +=
s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
- ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
s.n_pkts_tc_dropped[tc_id];
- ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
s.n_bytes_tc_dropped[tc_id];
}
struct tm_node *ns = np->parent_node;
uint32_t subport_id = tm_node_subport_id(dev, ns);
-
+ uint32_t tc_id, queue_id;
uint32_t i;
/* Stats read */
struct rte_sched_queue_stats s;
uint16_t qlen;
+ if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
+ tc_id = i;
+ queue_id = i;
+ } else {
+ tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
+ queue_id = i - tc_id;
+ }
+
uint32_t qid = tm_port_queue_id(dev,
subport_id,
pipe_id,
- i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
- i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+ tc_id,
+ queue_id);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
/* Stats accumulate */
np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
- np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
- np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
+ np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
s.n_bytes_dropped;
np->stats.leaf.n_pkts_queued = qlen;
}
struct tm_node *ns = np->parent_node;
uint32_t subport_id = tm_node_subport_id(dev, ns);
-
- uint32_t i;
+ struct rte_sched_queue_stats s;
+ uint32_t qid, i;
+ uint16_t qlen;
+ int status;
/* Stats read */
- for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- struct rte_sched_queue_stats s;
- uint16_t qlen;
-
- uint32_t qid = tm_port_queue_id(dev,
+ if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
+ qid = tm_port_queue_id(dev,
subport_id,
pipe_id,
tc_id,
- i);
+ 0);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
/* Stats accumulate */
nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
- nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
- nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
+ nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
s.n_bytes_dropped;
nt->stats.leaf.n_pkts_queued = qlen;
+ } else {
+ for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
+ qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ i);
+
+ status = rte_sched_queue_read_stats(SCHED(p),
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
+ s.n_pkts_dropped;
+ nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
+ s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_queued = qlen;
+ }
}
/* Stats copy */
tc_id,
queue_id);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
/* Stats accumulate */
nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
- nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
- nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
+ nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
s.n_bytes_dropped;
nq->stats.leaf.n_pkts_queued = qlen;