/* Subport */
n_subports = t->port_params.n_subports_per_port;
for (subport_id = 0; subport_id < n_subports; subport_id++) {
- uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ uint32_t n_pipes_per_subport =
+ t->subport_params[subport_id].n_pipes_per_subport_enabled;
uint32_t pipe_id;
int status;
status = rte_sched_subport_config(sched,
subport_id,
- &t->subport_params[subport_id]);
+ &t->subport_params[subport_id], 0);
if (status) {
rte_sched_port_free(sched);
return NULL;
{
struct pmd_internals *p = dev->data->dev_private;
uint32_t n_queues_max = p->params.tm.n_queues;
- uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+ uint32_t n_tc_max =
+ (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ / RTE_SCHED_QUEUES_PER_PIPE;
uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
uint32_t n_subports_max = n_pipes_max;
uint32_t n_root_max = 1;
.shaper_private_dual_rate_n_max = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = UINT32_MAX,
.shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
.shaper_shared_dual_rate_n_max = 0,
.shaper_shared_rate_min = 1,
.shaper_shared_rate_max = UINT32_MAX,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 1,
.shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
.shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
.cman_wred_packet_mode_supported = WRED_SUPPORTED,
.cman_wred_byte_mode_supported = 0,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
.stats_mask = STATS_MASK_DEFAULT,
} },
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.sched_n_children_max = UINT32_MAX,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_groups_max = 1,
#ifdef RTE_SCHED_SUBPORT_TC_OV
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
#else
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
#endif
+
.stats_mask = STATS_MASK_DEFAULT,
} },
},
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.sched_n_children_max =
RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
.sched_wfq_n_children_per_group_max = 1,
.sched_wfq_n_groups_max = 0,
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 0,
.stats_mask = STATS_MASK_DEFAULT,
} },
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 1,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 1,
.sched_n_children_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
.stats_mask = STATS_MASK_DEFAULT,
} },
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 0,
.shaper_private_rate_max = 0,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 0,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
.cman_head_drop_supported = 0,
.cman_wred_packet_mode_supported = WRED_SUPPORTED,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
{.nonleaf = {
.sched_n_children_max = UINT32_MAX,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
} },
.stats_mask = STATS_MASK_DEFAULT,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
{.nonleaf = {
.sched_n_children_max = UINT32_MAX,
.sched_wfq_n_children_per_group_max = UINT32_MAX,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
} },
.stats_mask = STATS_MASK_DEFAULT,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
{.nonleaf = {
.sched_n_children_max =
.sched_wfq_n_children_per_group_max = 1,
.sched_wfq_n_groups_max = 0,
.sched_wfq_weight_max = 1,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 0,
} },
.stats_mask = STATS_MASK_DEFAULT,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 1,
.shaper_private_rate_max = UINT32_MAX,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 1,
.shaper_shared_n_max = 1,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 1,
{.nonleaf = {
.sched_n_children_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_sp_n_priorities_max = 1,
.sched_wfq_n_children_per_group_max =
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ RTE_SCHED_BE_QUEUES_PER_PIPE,
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .sched_wfq_packet_mode_supported = 0,
+ .sched_wfq_byte_mode_supported = 1,
} },
.stats_mask = STATS_MASK_DEFAULT,
.shaper_private_dual_rate_supported = 0,
.shaper_private_rate_min = 0,
.shaper_private_rate_max = 0,
+ .shaper_private_packet_mode_supported = 0,
+ .shaper_private_byte_mode_supported = 0,
.shaper_shared_n_max = 0,
+ .shaper_shared_packet_mode_supported = 0,
+ .shaper_shared_byte_mode_supported = 0,
{.leaf = {
NULL,
rte_strerror(EINVAL));
+ /* Packet mode is not supported. */
+ if (profile->packet_mode != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE,
+ NULL,
+ rte_strerror(EINVAL));
return 0;
}
/* Update the subport configuration. */
if (rte_sched_subport_config(SCHED(p),
- subport_id, &subport_params))
+ subport_id, &subport_params, 0))
return -1;
/* Commit changes. */
/* Traffic Class (TC) */
pp->tc_period = PIPE_TC_PERIOD;
-#ifdef RTE_SCHED_SUBPORT_TC_OV
pp->tc_ov_weight = np->weight;
-#endif
TAILQ_FOREACH(nt, nl, node) {
uint32_t queue_id = 0;
/* Queue */
TAILQ_FOREACH(nq, nl, node) {
- uint32_t pipe_queue_id;
if (nq->level != TM_NODE_LEVEL_QUEUE ||
nq->parent_node_id != nt->node_id)
continue;
- pipe_queue_id = nt->priority *
- RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
- pp->wrr_weights[pipe_queue_id] = nq->weight;
+ if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE)
+ pp->wrr_weights[queue_id] = nq->weight;
queue_id++;
}
struct pmd_internals *p = dev->data->dev_private;
struct tm_params *t = &p->soft.tm.params;
- if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
+ if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) {
*pipe_profile_id = t->n_pipe_profiles;
return 1;
}
#ifdef RTE_SCHED_RED
static void
-wred_profiles_set(struct rte_eth_dev *dev)
+wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
{
struct pmd_internals *p = dev->data->dev_private;
- struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
+ struct rte_sched_subport_params *pp =
+ &p->soft.tm.params.subport_params[subport_id];
+
uint32_t tc_id;
enum rte_color color;
#else
-#define wred_profiles_set(dev)
+#define wred_profiles_set(dev, subport_id)
#endif
rte_strerror(EINVAL));
}
- /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
+ /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */
TAILQ_FOREACH(np, nl, node) {
uint32_t mask = 0, mask_expected =
RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
rte_strerror(EINVAL));
}
- /* Each TC has exactly 4 packet queues. */
+ /** Each Strict priority TC has exactly 1 packet queues while
+ * lowest priority TC (Best-effort) has 4 queues.
+ */
TAILQ_FOREACH(nt, nl, node) {
if (nt->level != TM_NODE_LEVEL_TC)
continue;
- if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
.frame_overhead =
root->shaper_profile->params.pkt_length_adjust,
.n_subports_per_port = root->n_children,
- .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
- h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
- .qsize = {p->params.tm.qsize[0],
- p->params.tm.qsize[1],
- p->params.tm.qsize[2],
- p->params.tm.qsize[3],
- },
- .pipe_profiles = t->pipe_profiles,
- .n_pipe_profiles = t->n_pipe_profiles,
+ .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT,
};
- wred_profiles_set(dev);
-
subport_id = 0;
TAILQ_FOREACH(n, nl, node) {
uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
tc_rate[1],
tc_rate[2],
tc_rate[3],
- },
- .tc_period = SUBPORT_TC_PERIOD,
+ tc_rate[4],
+ tc_rate[5],
+ tc_rate[6],
+ tc_rate[7],
+ tc_rate[8],
+ tc_rate[9],
+ tc_rate[10],
+ tc_rate[11],
+ tc_rate[12],
+ },
+ .tc_period = SUBPORT_TC_PERIOD,
+ .n_pipes_per_subport_enabled =
+ h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
+ .qsize = {p->params.tm.qsize[0],
+ p->params.tm.qsize[1],
+ p->params.tm.qsize[2],
+ p->params.tm.qsize[3],
+ p->params.tm.qsize[4],
+ p->params.tm.qsize[5],
+ p->params.tm.qsize[6],
+ p->params.tm.qsize[7],
+ p->params.tm.qsize[8],
+ p->params.tm.qsize[9],
+ p->params.tm.qsize[10],
+ p->params.tm.qsize[11],
+ p->params.tm.qsize[12],
+ },
+ .pipe_profiles = t->pipe_profiles,
+ .n_pipe_profiles = t->n_pipe_profiles,
+ .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE,
};
-
+ wred_profiles_set(dev, subport_id);
subport_id++;
}
}
uint32_t queue_id = tm_node_queue_id(dev, nq);
struct tm_node *nt = nq->parent_node;
- uint32_t tc_id = tm_node_tc_id(dev, nt);
struct tm_node *np = nt->parent_node;
uint32_t pipe_id = tm_node_pipe_id(dev, np);
struct tm_node *ns = np->parent_node;
uint32_t subport_id = tm_node_subport_id(dev, ns);
- uint32_t pipe_queue_id =
- tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+ uint32_t pipe_be_queue_id =
+ queue_id - RTE_SCHED_TRAFFIC_CLASS_BE;
struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
struct rte_sched_pipe_params profile1;
/* Derive new pipe profile. */
memcpy(&profile1, profile0, sizeof(profile1));
- profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
+ profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight;
/* Since implementation does not allow adding more pipe profiles after
* port configuration, the pipe configuration can be successfully
/* Update the subport configuration. */
if (rte_sched_subport_config(SCHED(p), subport_id,
- &subport_params))
+ &subport_params, 0))
return -1;
/* Commit changes. */
uint32_t port_pipe_id =
port_subport_id * n_pipes_per_subport + subport_pipe_id;
- uint32_t port_tc_id =
- port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
+
uint32_t port_queue_id =
- port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
+ port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id;
return port_queue_id;
}
struct tm_node *ns = np->parent_node;
uint32_t subport_id = tm_node_subport_id(dev, ns);
-
+ uint32_t tc_id, queue_id;
uint32_t i;
/* Stats read */
struct rte_sched_queue_stats s;
uint16_t qlen;
+ if (i < RTE_SCHED_TRAFFIC_CLASS_BE) {
+ tc_id = i;
+ queue_id = i;
+ } else {
+ tc_id = RTE_SCHED_TRAFFIC_CLASS_BE;
+ queue_id = i - tc_id;
+ }
+
uint32_t qid = tm_port_queue_id(dev,
subport_id,
pipe_id,
- i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
- i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+ tc_id,
+ queue_id);
int status = rte_sched_queue_read_stats(SCHED(p),
qid,
struct tm_node *ns = np->parent_node;
uint32_t subport_id = tm_node_subport_id(dev, ns);
-
- uint32_t i;
+ struct rte_sched_queue_stats s;
+ uint32_t qid, i;
+ uint16_t qlen;
+ int status;
/* Stats read */
- for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- struct rte_sched_queue_stats s;
- uint16_t qlen;
-
- uint32_t qid = tm_port_queue_id(dev,
+ if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) {
+ qid = tm_port_queue_id(dev,
subport_id,
pipe_id,
tc_id,
- i);
+ 0);
- int status = rte_sched_queue_read_stats(SCHED(p),
+ status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
s.n_bytes_dropped;
nt->stats.leaf.n_pkts_queued = qlen;
+ } else {
+ for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
+ qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ i);
+
+ status = rte_sched_queue_read_stats(SCHED(p),
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
+ s.n_pkts_dropped;
+ nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
+ s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_queued = qlen;
+ }
}
/* Stats copy */