X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsoftnic%2Frte_eth_softnic_tm.c;h=90baba15ce582ebf03611ed50c6eb900c479bd36;hb=b37ed6def36798342172f298516c5fc6d0d8e070;hp=61c3adc82710baa9b8f7feac71bbac3020811e31;hpb=5eb676d74fc8a15ed677fba7b7b3f5efb2af5fae;p=dpdk.git diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c index 61c3adc827..90baba15ce 100644 --- a/drivers/net/softnic/rte_eth_softnic_tm.c +++ b/drivers/net/softnic/rte_eth_softnic_tm.c @@ -85,13 +85,15 @@ softnic_tmgr_port_create(struct pmd_internals *p, /* Subport */ n_subports = t->port_params.n_subports_per_port; for (subport_id = 0; subport_id < n_subports; subport_id++) { - uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport; + uint32_t n_pipes_per_subport = + t->subport_params[subport_id].n_pipes_per_subport_enabled; uint32_t pipe_id; int status; status = rte_sched_subport_config(sched, subport_id, - &t->subport_params[subport_id]); + &t->subport_params[subport_id], + t->subport_to_profile[subport_id]); if (status) { rte_sched_port_free(sched); return NULL; @@ -446,6 +448,8 @@ static const struct rte_tm_capabilities tm_cap = { .shaper_private_dual_rate_n_max = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = UINT32_MAX, .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX, @@ -453,6 +457,8 @@ static const struct rte_tm_capabilities tm_cap = { .shaper_shared_dual_rate_n_max = 0, .shaper_shared_rate_min = 1, .shaper_shared_rate_max = UINT32_MAX, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 1, .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS, .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS, @@ -462,6 +468,8 @@ static const struct rte_tm_capabilities tm_cap = { .sched_wfq_n_children_per_group_max = UINT32_MAX, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = UINT32_MAX, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, .cman_wred_packet_mode_supported = WRED_SUPPORTED, .cman_wred_byte_mode_supported = 0, @@ -547,13 +555,19 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, .sched_n_children_max = UINT32_MAX, .sched_sp_n_priorities_max = 1, .sched_wfq_n_children_per_group_max = UINT32_MAX, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = 1, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, .stats_mask = STATS_MASK_DEFAULT, } }, @@ -571,7 +585,11 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, .sched_n_children_max = UINT32_MAX, .sched_sp_n_priorities_max = 1, @@ -579,9 +597,14 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .sched_wfq_n_groups_max = 1, #ifdef RTE_SCHED_SUBPORT_TC_OV .sched_wfq_weight_max = UINT32_MAX, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, #else .sched_wfq_weight_max = 1, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, #endif + .stats_mask = STATS_MASK_DEFAULT, } }, }, @@ -598,7 +621,11 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, .sched_n_children_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, @@ -607,6 +634,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .sched_wfq_n_children_per_group_max = 1, .sched_wfq_n_groups_max = 0, .sched_wfq_weight_max = 1, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 0, .stats_mask = STATS_MASK_DEFAULT, } }, @@ -624,7 +653,11 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 1, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 1, .sched_n_children_max = RTE_SCHED_BE_QUEUES_PER_PIPE, @@ -633,6 +666,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { RTE_SCHED_BE_QUEUES_PER_PIPE, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = UINT32_MAX, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, .stats_mask = STATS_MASK_DEFAULT, } }, @@ -650,7 +685,11 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 0, .shaper_private_rate_max = 0, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 0, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, .cman_head_drop_supported = 0, .cman_wred_packet_mode_supported = WRED_SUPPORTED, @@ -735,7 +774,11 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, {.nonleaf = { .sched_n_children_max = UINT32_MAX, @@ -743,6 +786,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .sched_wfq_n_children_per_group_max = UINT32_MAX, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = 1, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, } }, .stats_mask = STATS_MASK_DEFAULT, @@ -753,7 +798,11 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, {.nonleaf = { .sched_n_children_max = UINT32_MAX, @@ -761,6 +810,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .sched_wfq_n_children_per_group_max = UINT32_MAX, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = UINT32_MAX, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, } }, .stats_mask = STATS_MASK_DEFAULT, @@ -771,7 +822,11 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, {.nonleaf = { .sched_n_children_max = @@ -781,6 +836,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .sched_wfq_n_children_per_group_max = 1, .sched_wfq_n_groups_max = 0, .sched_wfq_weight_max = 1, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 0, } }, .stats_mask = STATS_MASK_DEFAULT, @@ -791,7 +848,11 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 1, .shaper_private_rate_max = UINT32_MAX, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 1, .shaper_shared_n_max = 1, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 1, {.nonleaf = { .sched_n_children_max = @@ -801,6 +862,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { RTE_SCHED_BE_QUEUES_PER_PIPE, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = UINT32_MAX, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, } }, .stats_mask = STATS_MASK_DEFAULT, @@ -811,7 +874,11 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { .shaper_private_dual_rate_supported = 0, .shaper_private_rate_min = 0, .shaper_private_rate_max = 0, + .shaper_private_packet_mode_supported = 0, + .shaper_private_byte_mode_supported = 0, .shaper_shared_n_max = 0, + .shaper_shared_packet_mode_supported = 0, + .shaper_shared_byte_mode_supported = 0, {.leaf = { @@ -946,6 +1013,13 @@ shaper_profile_check(struct rte_eth_dev *dev, NULL, rte_strerror(EINVAL)); + /* Packet mode is not supported. */ + if (profile->packet_mode != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE, + NULL, + rte_strerror(EINVAL)); return 0; } @@ -1041,34 +1115,56 @@ tm_shared_shaper_get_tc(struct rte_eth_dev *dev, return NULL; } +static int +subport_profile_exists(struct rte_eth_dev *dev, + struct rte_sched_subport_profile_params *sp, + uint32_t *subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + uint32_t i; + + for (i = 0; i < t->n_subport_profiles; i++) + if (memcmp(&t->subport_profile[i], sp, sizeof(*sp)) == 0) { + if (subport_profile_id) + *subport_profile_id = i; + return 1; + } + + return 0; +} + static int update_subport_tc_rate(struct rte_eth_dev *dev, struct tm_node *nt, struct tm_shared_shaper *ss, struct tm_shaper_profile *sp_new) { + struct rte_sched_subport_profile_params subport_profile; struct pmd_internals *p = dev->data->dev_private; uint32_t tc_id = tm_node_tc_id(dev, nt); - struct tm_node *np = nt->parent_node; - struct tm_node *ns = np->parent_node; uint32_t subport_id = tm_node_subport_id(dev, ns); - - struct rte_sched_subport_params subport_params; - + struct tm_params *t = &p->soft.tm.params; + uint32_t subport_profile_id; struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev, ss->shaper_profile_id); + if (subport_id >= TM_MAX_SUBPORT_PROFILE) + return -1; + + subport_profile_id = t->subport_to_profile[subport_id]; + /* Derive new subport configuration. */ - memcpy(&subport_params, - &p->soft.tm.params.subport_params[subport_id], - sizeof(subport_params)); - subport_params.tc_rate[tc_id] = sp_new->params.peak.rate; + memcpy(&subport_profile, + &p->soft.tm.params.subport_profile[subport_profile_id], + sizeof(subport_profile)); + subport_profile.tc_rate[tc_id] = sp_new->params.peak.rate; /* Update the subport configuration. */ if (rte_sched_subport_config(SCHED(p), - subport_id, &subport_params)) + subport_id, NULL, subport_profile_id)) return -1; /* Commit changes. */ @@ -1077,9 +1173,9 @@ update_subport_tc_rate(struct rte_eth_dev *dev, ss->shaper_profile_id = sp_new->shaper_profile_id; sp_new->n_users++; - memcpy(&p->soft.tm.params.subport_params[subport_id], - &subport_params, - sizeof(subport_params)); + memcpy(&p->soft.tm.params.subport_profile[subport_profile_id], + &subport_profile, + sizeof(subport_profile)); return 0; } @@ -2165,6 +2261,8 @@ pipe_profiles_generate(struct rte_eth_dev *dev) struct rte_sched_pipe_params pp; uint32_t pos; + memset(&pp, 0, sizeof(pp)); + if (np->level != TM_NODE_LEVEL_PIPE || np->parent_node_id != ns->node_id) continue; @@ -2211,10 +2309,11 @@ tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id) #ifdef RTE_SCHED_RED static void -wred_profiles_set(struct rte_eth_dev *dev) +wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id) { struct pmd_internals *p = dev->data->dev_private; - struct rte_sched_port_params *pp = &p->soft.tm.params.port_params; + struct rte_sched_subport_params *pp = + &p->soft.tm.params.subport_params[subport_id]; uint32_t tc_id; enum rte_color color; @@ -2234,7 +2333,7 @@ wred_profiles_set(struct rte_eth_dev *dev) #else -#define wred_profiles_set(dev) +#define wred_profiles_set(dev, subport_id) #endif @@ -2269,6 +2368,126 @@ tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev, return NULL; } +static struct rte_sched_subport_profile_params * +subport_profile_get(struct rte_eth_dev *dev, struct tm_node *np) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + uint32_t subport_id = tm_node_subport_id(dev, np->parent_node); + + if (subport_id >= TM_MAX_SUBPORT_PROFILE) + return NULL; + + return &t->subport_profile[subport_id]; +} + +static void +subport_profile_mark(struct rte_eth_dev *dev, + uint32_t subport_id, + uint32_t subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + t->subport_to_profile[subport_id] = subport_profile_id; +} + +static void +subport_profile_install(struct rte_eth_dev *dev, + struct rte_sched_subport_profile_params *sp, + uint32_t subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + memcpy(&t->subport_profile[subport_profile_id], + sp, sizeof(*sp)); + t->n_subport_profiles++; +} + +static int +subport_profile_free_exists(struct rte_eth_dev *dev, + uint32_t *subport_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + if (t->n_subport_profiles < TM_MAX_SUBPORT_PROFILE) { + *subport_profile_id = t->n_subport_profiles; + return 1; + } + + return 0; +} + +static void +subport_profile_build(struct rte_eth_dev *dev, struct tm_node *np, + struct rte_sched_subport_profile_params *sp) +{ + uint32_t i; + memset(sp, 0, sizeof(*sp)); + + sp->tb_rate = np->shaper_profile->params.peak.rate; + sp->tb_size = np->shaper_profile->params.peak.size; + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + struct tm_shared_shaper *ss; + struct tm_shaper_profile *ssp; + + ss = tm_subport_tc_shared_shaper_get(dev, np, i); + ssp = (ss) ? tm_shaper_profile_search(dev, + ss->shaper_profile_id) : + np->shaper_profile; + sp->tc_rate[i] = ssp->params.peak.rate; + } + + /* Traffic Class (TC) */ + sp->tc_period = SUBPORT_TC_PERIOD; +} + +static int +subport_profiles_generate(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_node_list *nl = &h->nodes; + struct tm_node *ns; + uint32_t subport_id; + + /* Objective: Fill in the following fields in struct tm_params: + * - subport_profiles + * - n_subport_profiles + * - subport_to_profile + */ + + subport_id = 0; + TAILQ_FOREACH(ns, nl, node) { + if (ns->level != TM_NODE_LEVEL_SUBPORT) + continue; + + struct rte_sched_subport_profile_params sp; + uint32_t pos; + + memset(&sp, 0, sizeof(sp)); + + subport_profile_build(dev, ns, &sp); + + if (!subport_profile_exists(dev, &sp, &pos)) { + if (!subport_profile_free_exists(dev, &pos)) + return -1; + + subport_profile_install(dev, &sp, pos); + } + + subport_profile_mark(dev, subport_id, pos); + + subport_id++; + } + + return 0; +} + + static int hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error) { @@ -2445,6 +2664,15 @@ hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error) rte_strerror(EINVAL)); } + /* Not too many subport profiles. */ + if (subport_profiles_generate(dev)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + /* Not too many pipe profiles. */ if (pipe_profiles_generate(dev)) return -rte_tm_error_set(error, @@ -2526,70 +2754,42 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev) .frame_overhead = root->shaper_profile->params.pkt_length_adjust, .n_subports_per_port = root->n_children, - .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / - h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT], - .qsize = {p->params.tm.qsize[0], - p->params.tm.qsize[1], - p->params.tm.qsize[2], - p->params.tm.qsize[3], - p->params.tm.qsize[4], - p->params.tm.qsize[5], - p->params.tm.qsize[6], - p->params.tm.qsize[7], - p->params.tm.qsize[8], - p->params.tm.qsize[9], - p->params.tm.qsize[10], - p->params.tm.qsize[11], - p->params.tm.qsize[12], - }, - .pipe_profiles = t->pipe_profiles, - .n_pipe_profiles = t->n_pipe_profiles, - .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE, + .n_subport_profiles = t->n_subport_profiles, + .subport_profiles = t->subport_profile, + .n_max_subport_profiles = TM_MAX_SUBPORT_PROFILE, + .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT, }; - wred_profiles_set(dev); - subport_id = 0; TAILQ_FOREACH(n, nl, node) { - uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint32_t i; if (n->level != TM_NODE_LEVEL_SUBPORT) continue; - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - struct tm_shared_shaper *ss; - struct tm_shaper_profile *sp; - - ss = tm_subport_tc_shared_shaper_get(dev, n, i); - sp = (ss) ? tm_shaper_profile_search(dev, - ss->shaper_profile_id) : - n->shaper_profile; - tc_rate[i] = sp->params.peak.rate; - } - t->subport_params[subport_id] = (struct rte_sched_subport_params) { - .tb_rate = n->shaper_profile->params.peak.rate, - .tb_size = n->shaper_profile->params.peak.size, - - .tc_rate = {tc_rate[0], - tc_rate[1], - tc_rate[2], - tc_rate[3], - tc_rate[4], - tc_rate[5], - tc_rate[6], - tc_rate[7], - tc_rate[8], - tc_rate[9], - tc_rate[10], - tc_rate[11], - tc_rate[12], + .n_pipes_per_subport_enabled = + h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / + h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT], + .qsize = {p->params.tm.qsize[0], + p->params.tm.qsize[1], + p->params.tm.qsize[2], + p->params.tm.qsize[3], + p->params.tm.qsize[4], + p->params.tm.qsize[5], + p->params.tm.qsize[6], + p->params.tm.qsize[7], + p->params.tm.qsize[8], + p->params.tm.qsize[9], + p->params.tm.qsize[10], + p->params.tm.qsize[11], + p->params.tm.qsize[12], }, - .tc_period = SUBPORT_TC_PERIOD, + .pipe_profiles = t->pipe_profiles, + .n_pipe_profiles = t->n_pipe_profiles, + .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE, }; - + wred_profiles_set(dev, subport_id); subport_id++; } } @@ -2827,18 +3027,30 @@ update_subport_rate(struct rte_eth_dev *dev, struct pmd_internals *p = dev->data->dev_private; uint32_t subport_id = tm_node_subport_id(dev, ns); - struct rte_sched_subport_params subport_params; + struct rte_sched_subport_profile_params *profile0 = + subport_profile_get(dev, ns); + struct rte_sched_subport_profile_params profile1; + uint32_t subport_profile_id; - /* Derive new subport configuration. */ - memcpy(&subport_params, - &p->soft.tm.params.subport_params[subport_id], - sizeof(subport_params)); - subport_params.tb_rate = sp->params.peak.rate; - subport_params.tb_size = sp->params.peak.size; + if (profile0 == NULL) + return -1; + + /* Derive new pipe profile. */ + memcpy(&profile1, profile0, sizeof(profile1)); + profile1.tb_rate = sp->params.peak.rate; + profile1.tb_size = sp->params.peak.size; + + /* Since implementation does not allow adding more subport profiles + * after port configuration, the pipe configuration can be successfully + * updated only if the new profile is also part of the existing set of + * pipe profiles. + */ + if (subport_profile_exists(dev, &profile1, &subport_profile_id) == 0) + return -1; /* Update the subport configuration. */ if (rte_sched_subport_config(SCHED(p), subport_id, - &subport_params)) + NULL, subport_profile_id)) return -1; /* Commit changes. */ @@ -2848,9 +3060,11 @@ update_subport_rate(struct rte_eth_dev *dev, ns->params.shaper_profile_id = sp->shaper_profile_id; sp->n_users++; - memcpy(&p->soft.tm.params.subport_params[subport_id], - &subport_params, - sizeof(subport_params)); + subport_profile_mark(dev, subport_id, subport_profile_id); + + memcpy(&p->soft.tm.params.subport_profile[subport_profile_id], + &profile1, + sizeof(profile1)); return 0; }