From: Jasvinder Singh Date: Mon, 22 Jul 2019 11:01:44 +0000 (+0100) Subject: net/softnic: add config flexibility to TM X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=5eb676d74fc8a15ed677fba7b7b3f5efb2af5fae;p=dpdk.git net/softnic: add config flexibility to TM Update softnic tm function for configuration flexiblity of pipe traffic classes and queues size. Signed-off-by: Jasvinder Singh Signed-off-by: Abraham Tovar Signed-off-by: Lukasz Krakowiak --- diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c index 4bda2f2b02..e3ad241618 100644 --- a/drivers/net/softnic/rte_eth_softnic.c +++ b/drivers/net/softnic/rte_eth_softnic.c @@ -28,6 +28,16 @@ #define PMD_PARAM_TM_QSIZE1 "tm_qsize1" #define PMD_PARAM_TM_QSIZE2 "tm_qsize2" #define PMD_PARAM_TM_QSIZE3 "tm_qsize3" +#define PMD_PARAM_TM_QSIZE4 "tm_qsize4" +#define PMD_PARAM_TM_QSIZE5 "tm_qsize5" +#define PMD_PARAM_TM_QSIZE6 "tm_qsize6" +#define PMD_PARAM_TM_QSIZE7 "tm_qsize7" +#define PMD_PARAM_TM_QSIZE8 "tm_qsize8" +#define PMD_PARAM_TM_QSIZE9 "tm_qsize9" +#define PMD_PARAM_TM_QSIZE10 "tm_qsize10" +#define PMD_PARAM_TM_QSIZE11 "tm_qsize11" +#define PMD_PARAM_TM_QSIZE12 "tm_qsize12" + static const char * const pmd_valid_args[] = { PMD_PARAM_FIRMWARE, @@ -39,6 +49,15 @@ static const char * const pmd_valid_args[] = { PMD_PARAM_TM_QSIZE1, PMD_PARAM_TM_QSIZE2, PMD_PARAM_TM_QSIZE3, + PMD_PARAM_TM_QSIZE4, + PMD_PARAM_TM_QSIZE5, + PMD_PARAM_TM_QSIZE6, + PMD_PARAM_TM_QSIZE7, + PMD_PARAM_TM_QSIZE8, + PMD_PARAM_TM_QSIZE9, + PMD_PARAM_TM_QSIZE10, + PMD_PARAM_TM_QSIZE11, + PMD_PARAM_TM_QSIZE12, NULL }; @@ -434,6 +453,15 @@ pmd_parse_args(struct pmd_params *p, const char *params) p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE; p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE; p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[4] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[5] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[6] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[7] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[8] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[9] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[10] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[11] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[12] = SOFTNIC_TM_QUEUE_SIZE; /* Firmware script (optional) */ if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) { @@ -504,6 +532,67 @@ pmd_parse_args(struct pmd_params *p, const char *params) goto out_free; } + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE4) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE4, + &get_uint32, &p->tm.qsize[4]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE5) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE5, + &get_uint32, &p->tm.qsize[5]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE6) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE6, + &get_uint32, &p->tm.qsize[6]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE7) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE7, + &get_uint32, &p->tm.qsize[7]); + if (ret < 0) + goto out_free; + } + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE8) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE8, + &get_uint32, &p->tm.qsize[8]); + if (ret < 0) + goto out_free; + } + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE9) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE9, + &get_uint32, &p->tm.qsize[9]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE10) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE10, + &get_uint32, &p->tm.qsize[10]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE11) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE11, + &get_uint32, &p->tm.qsize[11]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE12) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE12, + &get_uint32, &p->tm.qsize[12]); + if (ret < 0) + goto out_free; + } + out_free: rte_kvargs_free(kvlist); return ret; @@ -588,6 +677,15 @@ RTE_PMD_REGISTER_PARAM_STRING(net_softnic, PMD_PARAM_TM_QSIZE1 "= " PMD_PARAM_TM_QSIZE2 "= " PMD_PARAM_TM_QSIZE3 "=" + PMD_PARAM_TM_QSIZE4 "= " + PMD_PARAM_TM_QSIZE5 "= " + PMD_PARAM_TM_QSIZE6 "= " + PMD_PARAM_TM_QSIZE7 "= " + PMD_PARAM_TM_QSIZE8 "= " + PMD_PARAM_TM_QSIZE9 "= " + PMD_PARAM_TM_QSIZE10 "= " + PMD_PARAM_TM_QSIZE11 "=" + PMD_PARAM_TM_QSIZE12 "=" ); diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c index 88e0a848ec..bc95f16439 100644 --- a/drivers/net/softnic/rte_eth_softnic_cli.c +++ b/drivers/net/softnic/rte_eth_softnic_cli.c @@ -566,8 +566,7 @@ queue_node_id(uint32_t n_spp __rte_unused, uint32_t tc_id, uint32_t queue_id) { - return queue_id + - tc_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + + return queue_id + tc_id + (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE; } @@ -617,10 +616,20 @@ tmgr_hierarchy_default(struct pmd_internals *softnic, }, }; + uint32_t *shared_shaper_id = + (uint32_t *)calloc(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + sizeof(uint32_t)); + + if (shared_shaper_id == NULL) + return -1; + + memcpy(shared_shaper_id, params->shared_shaper_id.tc, + sizeof(params->shared_shaper_id.tc)); + struct rte_tm_node_params tc_node_params[] = { [0] = { .shaper_profile_id = params->shaper_profile_id.tc[0], - .shared_shaper_id = ¶ms->shared_shaper_id.tc[0], + .shared_shaper_id = &shared_shaper_id[0], .n_shared_shapers = (¶ms->shared_shaper_id.tc_valid[0]) ? 1 : 0, .nonleaf = { @@ -630,7 +639,7 @@ tmgr_hierarchy_default(struct pmd_internals *softnic, [1] = { .shaper_profile_id = params->shaper_profile_id.tc[1], - .shared_shaper_id = ¶ms->shared_shaper_id.tc[1], + .shared_shaper_id = &shared_shaper_id[1], .n_shared_shapers = (¶ms->shared_shaper_id.tc_valid[1]) ? 1 : 0, .nonleaf = { @@ -640,7 +649,7 @@ tmgr_hierarchy_default(struct pmd_internals *softnic, [2] = { .shaper_profile_id = params->shaper_profile_id.tc[2], - .shared_shaper_id = ¶ms->shared_shaper_id.tc[2], + .shared_shaper_id = &shared_shaper_id[2], .n_shared_shapers = (¶ms->shared_shaper_id.tc_valid[2]) ? 1 : 0, .nonleaf = { @@ -650,13 +659,103 @@ tmgr_hierarchy_default(struct pmd_internals *softnic, [3] = { .shaper_profile_id = params->shaper_profile_id.tc[3], - .shared_shaper_id = ¶ms->shared_shaper_id.tc[3], + .shared_shaper_id = &shared_shaper_id[3], .n_shared_shapers = (¶ms->shared_shaper_id.tc_valid[3]) ? 1 : 0, .nonleaf = { .n_sp_priorities = 1, }, }, + + [4] = { + .shaper_profile_id = params->shaper_profile_id.tc[4], + .shared_shaper_id = &shared_shaper_id[4], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[4]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [5] = { + .shaper_profile_id = params->shaper_profile_id.tc[5], + .shared_shaper_id = &shared_shaper_id[5], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[5]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [6] = { + .shaper_profile_id = params->shaper_profile_id.tc[6], + .shared_shaper_id = &shared_shaper_id[6], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[6]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [7] = { + .shaper_profile_id = params->shaper_profile_id.tc[7], + .shared_shaper_id = &shared_shaper_id[7], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[7]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [8] = { + .shaper_profile_id = params->shaper_profile_id.tc[8], + .shared_shaper_id = &shared_shaper_id[8], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[8]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [9] = { + .shaper_profile_id = params->shaper_profile_id.tc[9], + .shared_shaper_id = &shared_shaper_id[9], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[9]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [10] = { + .shaper_profile_id = params->shaper_profile_id.tc[10], + .shared_shaper_id = &shared_shaper_id[10], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[10]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [11] = { + .shaper_profile_id = params->shaper_profile_id.tc[11], + .shared_shaper_id = &shared_shaper_id[11], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[11]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [12] = { + .shaper_profile_id = params->shaper_profile_id.tc[12], + .shared_shaper_id = &shared_shaper_id[12], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[12]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, }; struct rte_tm_node_params queue_node_params = { @@ -730,7 +829,24 @@ tmgr_hierarchy_default(struct pmd_internals *softnic, return -1; /* Hierarchy level 4: Queue nodes */ - for (q = 0; q < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; q++) { + if (t < RTE_SCHED_TRAFFIC_CLASS_BE) { + /* Strict-priority traffic class queues */ + q = 0; + status = rte_tm_node_add(port_id, + queue_node_id(n_spp, n_pps, s, p, t, q), + tc_node_id(n_spp, n_pps, s, p, t), + 0, + params->weight.queue[q], + RTE_TM_NODE_LEVEL_ID_ANY, + &queue_node_params, + &error); + if (status) + return -1; + + continue; + } + /* Best-effort traffic class queues */ + for (q = 0; q < RTE_SCHED_BE_QUEUES_PER_PIPE; q++) { status = rte_tm_node_add(port_id, queue_node_id(n_spp, n_pps, s, p, t, q), tc_node_id(n_spp, n_pps, s, p, t), @@ -741,7 +857,7 @@ tmgr_hierarchy_default(struct pmd_internals *softnic, &error); if (status) return -1; - } /* Queue */ + } } /* TC */ } /* Pipe */ } /* Subport */ @@ -762,13 +878,31 @@ tmgr_hierarchy_default(struct pmd_internals *softnic, * tc1 * tc2 * tc3 + * tc4 + * tc5 + * tc6 + * tc7 + * tc8 + * tc9 + * tc10 + * tc11 + * tc12 * shared shaper * tc0 * tc1 * tc2 * tc3 + * tc4 + * tc5 + * tc6 + * tc7 + * tc8 + * tc9 + * tc10 + * tc11 + * tc12 * weight - * queue ... + * queue ... */ static void cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, @@ -778,11 +912,11 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, size_t out_size) { struct tmgr_hierarchy_default_params p; - int i, status; + int i, j, status; memset(&p, 0, sizeof(p)); - if (n_tokens != 50) { + if (n_tokens != 74) { snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); return; } @@ -894,27 +1028,118 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, return; } + if (strcmp(tokens[22], "tc4") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[4], tokens[23]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc4 profile id"); + return; + } + + if (strcmp(tokens[24], "tc5") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[5], tokens[25]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc5 profile id"); + return; + } + + if (strcmp(tokens[26], "tc6") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[6], tokens[27]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc6 profile id"); + return; + } + + if (strcmp(tokens[28], "tc7") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[7], tokens[29]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc7 profile id"); + return; + } + + if (strcmp(tokens[30], "tc8") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[8], tokens[31]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc8 profile id"); + return; + } + + if (strcmp(tokens[32], "tc9") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[9], tokens[33]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc9 profile id"); + return; + } + + if (strcmp(tokens[34], "tc10") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[10], tokens[35]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc10 profile id"); + return; + } + + if (strcmp(tokens[36], "tc11") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[11], tokens[37]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc11 profile id"); + return; + } + + if (strcmp(tokens[38], "tc12") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[12], tokens[39]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc12 profile id"); + return; + } + /* Shared shaper */ - if (strcmp(tokens[22], "shared") != 0) { + if (strcmp(tokens[40], "shared") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); return; } - if (strcmp(tokens[23], "shaper") != 0) { + if (strcmp(tokens[41], "shaper") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); return; } - if (strcmp(tokens[24], "tc0") != 0) { + if (strcmp(tokens[42], "tc0") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); return; } - if (strcmp(tokens[25], "none") == 0) + if (strcmp(tokens[43], "none") == 0) p.shared_shaper_id.tc_valid[0] = 0; else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], tokens[25]) != 0) { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], + tokens[43]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0"); return; } @@ -922,15 +1147,16 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, p.shared_shaper_id.tc_valid[0] = 1; } - if (strcmp(tokens[26], "tc1") != 0) { + if (strcmp(tokens[44], "tc1") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); return; } - if (strcmp(tokens[27], "none") == 0) + if (strcmp(tokens[45], "none") == 0) p.shared_shaper_id.tc_valid[1] = 0; else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], tokens[27]) != 0) { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], + tokens[45]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1"); return; } @@ -938,15 +1164,16 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, p.shared_shaper_id.tc_valid[1] = 1; } - if (strcmp(tokens[28], "tc2") != 0) { + if (strcmp(tokens[46], "tc2") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); return; } - if (strcmp(tokens[29], "none") == 0) + if (strcmp(tokens[47], "none") == 0) p.shared_shaper_id.tc_valid[2] = 0; else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], tokens[29]) != 0) { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], + tokens[47]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2"); return; } @@ -954,15 +1181,16 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, p.shared_shaper_id.tc_valid[2] = 1; } - if (strcmp(tokens[30], "tc3") != 0) { + if (strcmp(tokens[48], "tc3") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); return; } - if (strcmp(tokens[31], "none") == 0) + if (strcmp(tokens[49], "none") == 0) p.shared_shaper_id.tc_valid[3] = 0; else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], tokens[31]) != 0) { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], + tokens[49]) != 0) { snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3"); return; } @@ -970,22 +1198,181 @@ cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, p.shared_shaper_id.tc_valid[3] = 1; } + if (strcmp(tokens[50], "tc4") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); + return; + } + + if (strcmp(tokens[51], "none") == 0) { + p.shared_shaper_id.tc_valid[4] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[4], + tokens[51]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc4"); + return; + } + + p.shared_shaper_id.tc_valid[4] = 1; + } + + if (strcmp(tokens[52], "tc5") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); + return; + } + + if (strcmp(tokens[53], "none") == 0) { + p.shared_shaper_id.tc_valid[5] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[5], + tokens[53]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc5"); + return; + } + + p.shared_shaper_id.tc_valid[5] = 1; + } + + if (strcmp(tokens[54], "tc6") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); + return; + } + + if (strcmp(tokens[55], "none") == 0) { + p.shared_shaper_id.tc_valid[6] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[6], + tokens[55]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc6"); + return; + } + + p.shared_shaper_id.tc_valid[6] = 1; + } + + if (strcmp(tokens[56], "tc7") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); + return; + } + + if (strcmp(tokens[57], "none") == 0) { + p.shared_shaper_id.tc_valid[7] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[7], + tokens[57]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc7"); + return; + } + + p.shared_shaper_id.tc_valid[7] = 1; + } + + if (strcmp(tokens[58], "tc8") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); + return; + } + + if (strcmp(tokens[59], "none") == 0) { + p.shared_shaper_id.tc_valid[8] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[8], + tokens[59]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc8"); + return; + } + + p.shared_shaper_id.tc_valid[8] = 1; + } + + if (strcmp(tokens[60], "tc9") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); + return; + } + + if (strcmp(tokens[61], "none") == 0) { + p.shared_shaper_id.tc_valid[9] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[9], + tokens[61]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc9"); + return; + } + + p.shared_shaper_id.tc_valid[9] = 1; + } + + if (strcmp(tokens[62], "tc10") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); + return; + } + + if (strcmp(tokens[63], "none") == 0) { + p.shared_shaper_id.tc_valid[10] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[10], + tokens[63]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc10"); + return; + } + + p.shared_shaper_id.tc_valid[10] = 1; + } + + if (strcmp(tokens[64], "tc11") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); + return; + } + + if (strcmp(tokens[65], "none") == 0) { + p.shared_shaper_id.tc_valid[11] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[11], + tokens[65]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc11"); + return; + } + + p.shared_shaper_id.tc_valid[11] = 1; + } + + if (strcmp(tokens[66], "tc12") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); + return; + } + + if (strcmp(tokens[67], "none") == 0) { + p.shared_shaper_id.tc_valid[12] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[12], + tokens[67]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc12"); + return; + } + + p.shared_shaper_id.tc_valid[12] = 1; + } + /* Weight */ - if (strcmp(tokens[32], "weight") != 0) { + if (strcmp(tokens[68], "weight") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); return; } - if (strcmp(tokens[33], "queue") != 0) { + if (strcmp(tokens[69], "queue") != 0) { snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue"); return; } - for (i = 0; i < 16; i++) { - if (softnic_parser_read_uint32(&p.weight.queue[i], tokens[34 + i]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "weight queue"); - return; + for (i = 0, j = 0; i < 16; i++) { + if (i < RTE_SCHED_TRAFFIC_CLASS_BE) { + p.weight.queue[i] = 1; + } else { + if (softnic_parser_read_uint32(&p.weight.queue[i], + tokens[70 + j]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "weight queue"); + return; + } + j++; } } diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h index 08bc660510..6eec43b22b 100644 --- a/drivers/net/softnic/rte_eth_softnic_internals.h +++ b/drivers/net/softnic/rte_eth_softnic_internals.h @@ -161,13 +161,15 @@ TAILQ_HEAD(softnic_link_list, softnic_link); #define TM_MAX_PIPES_PER_SUBPORT 4096 #endif +#ifndef TM_MAX_PIPE_PROFILE +#define TM_MAX_PIPE_PROFILE 256 +#endif struct tm_params { struct rte_sched_port_params port_params; struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS]; - struct rte_sched_pipe_params - pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT]; + struct rte_sched_pipe_params pipe_profiles[TM_MAX_PIPE_PROFILE]; uint32_t n_pipe_profiles; uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT]; }; diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c index 58744a9eb8..61c3adc827 100644 --- a/drivers/net/softnic/rte_eth_softnic_tm.c +++ b/drivers/net/softnic/rte_eth_softnic_tm.c @@ -367,7 +367,9 @@ tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level) { struct pmd_internals *p = dev->data->dev_private; uint32_t n_queues_max = p->params.tm.n_queues; - uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; + uint32_t n_tc_max = + (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + / RTE_SCHED_QUEUES_PER_PIPE; uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; uint32_t n_subports_max = n_pipes_max; uint32_t n_root_max = 1; @@ -625,10 +627,10 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .shaper_shared_n_max = 1, .sched_n_children_max = - RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS, + RTE_SCHED_BE_QUEUES_PER_PIPE, .sched_sp_n_priorities_max = 1, .sched_wfq_n_children_per_group_max = - RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS, + RTE_SCHED_BE_QUEUES_PER_PIPE, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = UINT32_MAX, @@ -793,10 +795,10 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { {.nonleaf = { .sched_n_children_max = - RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS, + RTE_SCHED_BE_QUEUES_PER_PIPE, .sched_sp_n_priorities_max = 1, .sched_wfq_n_children_per_group_max = - RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS, + RTE_SCHED_BE_QUEUES_PER_PIPE, .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = UINT32_MAX, } }, @@ -2027,9 +2029,7 @@ pipe_profile_build(struct rte_eth_dev *dev, /* Traffic Class (TC) */ pp->tc_period = PIPE_TC_PERIOD; -#ifdef RTE_SCHED_SUBPORT_TC_OV pp->tc_ov_weight = np->weight; -#endif TAILQ_FOREACH(nt, nl, node) { uint32_t queue_id = 0; @@ -2043,15 +2043,13 @@ pipe_profile_build(struct rte_eth_dev *dev, /* Queue */ TAILQ_FOREACH(nq, nl, node) { - uint32_t pipe_queue_id; if (nq->level != TM_NODE_LEVEL_QUEUE || nq->parent_node_id != nt->node_id) continue; - pipe_queue_id = nt->priority * - RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id; - pp->wrr_weights[pipe_queue_id] = nq->weight; + if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE) + pp->wrr_weights[queue_id] = nq->weight; queue_id++; } @@ -2065,7 +2063,7 @@ pipe_profile_free_exists(struct rte_eth_dev *dev, struct pmd_internals *p = dev->data->dev_private; struct tm_params *t = &p->soft.tm.params; - if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) { + if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) { *pipe_profile_id = t->n_pipe_profiles; return 1; } @@ -2217,6 +2215,7 @@ wred_profiles_set(struct rte_eth_dev *dev) { struct pmd_internals *p = dev->data->dev_private; struct rte_sched_port_params *pp = &p->soft.tm.params.port_params; + uint32_t tc_id; enum rte_color color; @@ -2332,7 +2331,7 @@ hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error) rte_strerror(EINVAL)); } - /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */ + /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */ TAILQ_FOREACH(np, nl, node) { uint32_t mask = 0, mask_expected = RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, @@ -2364,12 +2363,14 @@ hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error) rte_strerror(EINVAL)); } - /* Each TC has exactly 4 packet queues. */ + /** Each Strict priority TC has exactly 1 packet queues while + * lowest priority TC (Best-effort) has 4 queues. + */ TAILQ_FOREACH(nt, nl, node) { if (nt->level != TM_NODE_LEVEL_TC) continue; - if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) + if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_UNSPECIFIED, @@ -2531,9 +2532,19 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev) p->params.tm.qsize[1], p->params.tm.qsize[2], p->params.tm.qsize[3], + p->params.tm.qsize[4], + p->params.tm.qsize[5], + p->params.tm.qsize[6], + p->params.tm.qsize[7], + p->params.tm.qsize[8], + p->params.tm.qsize[9], + p->params.tm.qsize[10], + p->params.tm.qsize[11], + p->params.tm.qsize[12], }, .pipe_profiles = t->pipe_profiles, .n_pipe_profiles = t->n_pipe_profiles, + .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE, }; wred_profiles_set(dev); @@ -2566,8 +2577,17 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev) tc_rate[1], tc_rate[2], tc_rate[3], - }, - .tc_period = SUBPORT_TC_PERIOD, + tc_rate[4], + tc_rate[5], + tc_rate[6], + tc_rate[7], + tc_rate[8], + tc_rate[9], + tc_rate[10], + tc_rate[11], + tc_rate[12], + }, + .tc_period = SUBPORT_TC_PERIOD, }; subport_id++; @@ -2657,7 +2677,6 @@ update_queue_weight(struct rte_eth_dev *dev, uint32_t queue_id = tm_node_queue_id(dev, nq); struct tm_node *nt = nq->parent_node; - uint32_t tc_id = tm_node_tc_id(dev, nt); struct tm_node *np = nt->parent_node; uint32_t pipe_id = tm_node_pipe_id(dev, np); @@ -2665,8 +2684,8 @@ update_queue_weight(struct rte_eth_dev *dev, struct tm_node *ns = np->parent_node; uint32_t subport_id = tm_node_subport_id(dev, ns); - uint32_t pipe_queue_id = - tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id; + uint32_t pipe_be_queue_id = + queue_id - RTE_SCHED_TRAFFIC_CLASS_BE; struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); struct rte_sched_pipe_params profile1; @@ -2674,7 +2693,7 @@ update_queue_weight(struct rte_eth_dev *dev, /* Derive new pipe profile. */ memcpy(&profile1, profile0, sizeof(profile1)); - profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight; + profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight; /* Since implementation does not allow adding more pipe profiles after * port configuration, the pipe configuration can be successfully @@ -3020,10 +3039,9 @@ tm_port_queue_id(struct rte_eth_dev *dev, uint32_t port_pipe_id = port_subport_id * n_pipes_per_subport + subport_pipe_id; - uint32_t port_tc_id = - port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id; + uint32_t port_queue_id = - port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id; + port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id; return port_queue_id; } @@ -3138,7 +3156,7 @@ read_pipe_stats(struct rte_eth_dev *dev, struct tm_node *ns = np->parent_node; uint32_t subport_id = tm_node_subport_id(dev, ns); - + uint32_t tc_id, queue_id; uint32_t i; /* Stats read */ @@ -3146,11 +3164,19 @@ read_pipe_stats(struct rte_eth_dev *dev, struct rte_sched_queue_stats s; uint16_t qlen; + if (i < RTE_SCHED_TRAFFIC_CLASS_BE) { + tc_id = i; + queue_id = i; + } else { + tc_id = RTE_SCHED_TRAFFIC_CLASS_BE; + queue_id = i - tc_id; + } + uint32_t qid = tm_port_queue_id(dev, subport_id, pipe_id, - i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS, - i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS); + tc_id, + queue_id); int status = rte_sched_queue_read_stats(SCHED(p), qid, @@ -3198,21 +3224,20 @@ read_tc_stats(struct rte_eth_dev *dev, struct tm_node *ns = np->parent_node; uint32_t subport_id = tm_node_subport_id(dev, ns); - - uint32_t i; + struct rte_sched_queue_stats s; + uint32_t qid, i; + uint16_t qlen; + int status; /* Stats read */ - for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) { - struct rte_sched_queue_stats s; - uint16_t qlen; - - uint32_t qid = tm_port_queue_id(dev, + if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) { + qid = tm_port_queue_id(dev, subport_id, pipe_id, tc_id, - i); + 0); - int status = rte_sched_queue_read_stats(SCHED(p), + status = rte_sched_queue_read_stats(SCHED(p), qid, &s, &qlen); @@ -3226,6 +3251,30 @@ read_tc_stats(struct rte_eth_dev *dev, nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += s.n_bytes_dropped; nt->stats.leaf.n_pkts_queued = qlen; + } else { + for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) { + qid = tm_port_queue_id(dev, + subport_id, + pipe_id, + tc_id, + i); + + status = rte_sched_queue_read_stats(SCHED(p), + qid, + &s, + &qlen); + if (status) + return status; + + /* Stats accumulate */ + nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; + nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; + nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += + s.n_pkts_dropped; + nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += + s.n_bytes_dropped; + nt->stats.leaf.n_pkts_queued = qlen; + } } /* Stats copy */