net/octeontx2: add flow init and fini
[dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
index 8da8310..58744a9 100644 (file)
 #include <string.h>
 
 #include <rte_malloc.h>
+#include <rte_string_fns.h>
 
 #include "rte_eth_softnic_internals.h"
 #include "rte_eth_softnic.h"
 
-#define BYTES_IN_MBPS          (1000 * 1000 / 8)
 #define SUBPORT_TC_PERIOD      10
 #define PIPE_TC_PERIOD         40
 
-static void
+int
+softnic_tmgr_init(struct pmd_internals *p)
+{
+       TAILQ_INIT(&p->tmgr_port_list);
+
+       return 0;
+}
+
+void
+softnic_tmgr_free(struct pmd_internals *p)
+{
+       for ( ; ; ) {
+               struct softnic_tmgr_port *tmgr_port;
+
+               tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
+               if (tmgr_port == NULL)
+                       break;
+
+               TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
+               rte_sched_port_free(tmgr_port->s);
+               free(tmgr_port);
+       }
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+       const char *name)
+{
+       struct softnic_tmgr_port *tmgr_port;
+
+       if (name == NULL)
+               return NULL;
+
+       TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
+               if (strcmp(tmgr_port->name, name) == 0)
+                       return tmgr_port;
+
+       return NULL;
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+       const char *name)
+{
+       struct softnic_tmgr_port *tmgr_port;
+       struct tm_params *t = &p->soft.tm.params;
+       struct rte_sched_port *sched;
+       uint32_t n_subports, subport_id;
+
+       /* Check input params */
+       if (name == NULL ||
+               softnic_tmgr_port_find(p, name))
+               return NULL;
+
+       /*
+        * Resource
+        */
+
+       /* Is hierarchy frozen? */
+       if (p->soft.tm.hierarchy_frozen == 0)
+               return NULL;
+
+       /* Port */
+       sched = rte_sched_port_config(&t->port_params);
+       if (sched == NULL)
+               return NULL;
+
+       /* Subport */
+       n_subports = t->port_params.n_subports_per_port;
+       for (subport_id = 0; subport_id < n_subports; subport_id++) {
+               uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+               uint32_t pipe_id;
+               int status;
+
+               status = rte_sched_subport_config(sched,
+                       subport_id,
+                       &t->subport_params[subport_id]);
+               if (status) {
+                       rte_sched_port_free(sched);
+                       return NULL;
+               }
+
+               /* Pipe */
+               for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+                       int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
+                       int profile_id = t->pipe_to_profile[pos];
+
+                       if (profile_id < 0)
+                               continue;
+
+                       status = rte_sched_pipe_config(sched,
+                               subport_id,
+                               pipe_id,
+                               profile_id);
+                       if (status) {
+                               rte_sched_port_free(sched);
+                               return NULL;
+                       }
+               }
+       }
+
+       /* Node allocation */
+       tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
+       if (tmgr_port == NULL) {
+               rte_sched_port_free(sched);
+               return NULL;
+       }
+
+       /* Node fill in */
+       strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
+       tmgr_port->s = sched;
+
+       /* Node add to list */
+       TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
+
+       return tmgr_port;
+}
+
+static struct rte_sched_port *
+SCHED(struct pmd_internals *p)
+{
+       struct softnic_tmgr_port *tmgr_port;
+
+       tmgr_port = softnic_tmgr_port_find(p, "TMGR");
+       if (tmgr_port == NULL)
+               return NULL;
+
+       return tmgr_port->s;
+}
+
+void
 tm_hierarchy_init(struct pmd_internals *p)
 {
-       memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+       memset(&p->soft.tm, 0, sizeof(p->soft.tm));
 
        /* Initialize shaper profile list */
        TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
@@ -33,8 +163,8 @@ tm_hierarchy_init(struct pmd_internals *p)
        TAILQ_INIT(&p->soft.tm.h.nodes);
 }
 
-static void
-tm_hierarchy_uninit(struct pmd_internals *p)
+void
+tm_hierarchy_free(struct pmd_internals *p)
 {
        /* Remove all nodes*/
        for ( ; ; ) {
@@ -85,88 +215,7 @@ tm_hierarchy_uninit(struct pmd_internals *p)
                free(shaper_profile);
        }
 
-       memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
-}
-
-int
-tm_init(struct pmd_internals *p,
-       struct pmd_params *params __rte_unused,
-       int numa_node __rte_unused)
-{
        tm_hierarchy_init(p);
-
-       return 0;
-}
-
-void
-tm_free(struct pmd_internals *p)
-{
-       tm_hierarchy_uninit(p);
-}
-
-int
-tm_start(struct pmd_internals *p)
-{
-       struct tm_params *t = &p->soft.tm.params;
-       uint32_t n_subports, subport_id;
-       int status;
-
-       /* Is hierarchy frozen? */
-       if (p->soft.tm.hierarchy_frozen == 0)
-               return -1;
-
-       /* Port */
-       p->soft.tm.sched = rte_sched_port_config(&t->port_params);
-       if (p->soft.tm.sched == NULL)
-               return -1;
-
-       /* Subport */
-       n_subports = t->port_params.n_subports_per_port;
-       for (subport_id = 0; subport_id < n_subports; subport_id++) {
-               uint32_t n_pipes_per_subport =
-                       t->port_params.n_pipes_per_subport;
-               uint32_t pipe_id;
-
-               status = rte_sched_subport_config(p->soft.tm.sched,
-                       subport_id,
-                       &t->subport_params[subport_id]);
-               if (status) {
-                       rte_sched_port_free(p->soft.tm.sched);
-                       return -1;
-               }
-
-               /* Pipe */
-               n_pipes_per_subport = t->port_params.n_pipes_per_subport;
-               for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
-                       int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
-                               pipe_id;
-                       int profile_id = t->pipe_to_profile[pos];
-
-                       if (profile_id < 0)
-                               continue;
-
-                       status = rte_sched_pipe_config(p->soft.tm.sched,
-                               subport_id,
-                               pipe_id,
-                               profile_id);
-                       if (status) {
-                               rte_sched_port_free(p->soft.tm.sched);
-                               return -1;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-void
-tm_stop(struct pmd_internals *p)
-{
-       if (p->soft.tm.sched)
-               rte_sched_port_free(p->soft.tm.sched);
-
-       /* Unfreeze hierarchy */
-       p->soft.tm.hierarchy_frozen = 0;
 }
 
 static struct tm_shaper_profile *
@@ -1016,7 +1065,7 @@ update_subport_tc_rate(struct rte_eth_dev *dev,
        subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
 
        /* Update the subport configuration. */
-       if (rte_sched_subport_config(p->soft.tm.sched,
+       if (rte_sched_subport_config(SCHED(p),
                subport_id, &subport_params))
                return -1;
 
@@ -1155,7 +1204,7 @@ wred_profile_check(struct rte_eth_dev *dev,
        struct rte_tm_error *error)
 {
        struct tm_wred_profile *wp;
-       enum rte_tm_color color;
+       enum rte_color color;
 
        /* WRED profile ID must not be NONE. */
        if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
@@ -1191,7 +1240,7 @@ wred_profile_check(struct rte_eth_dev *dev,
                         rte_strerror(ENOTSUP));
 
        /* min_th <= max_th, max_th > 0  */
-       for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+       for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
                uint32_t min_th = profile->red_params[color].min_th;
                uint32_t max_th = profile->red_params[color].max_th;
 
@@ -2169,10 +2218,10 @@ wred_profiles_set(struct rte_eth_dev *dev)
        struct pmd_internals *p = dev->data->dev_private;
        struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
        uint32_t tc_id;
-       enum rte_tm_color color;
+       enum rte_color color;
 
        for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
-               for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+               for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
                        struct rte_red_params *dst =
                                &pp->red_params[tc_id][color];
                        struct tm_wred_profile *src_wp =
@@ -2544,10 +2593,8 @@ pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
 
        status = hierarchy_commit_check(dev, error);
        if (status) {
-               if (clear_on_fail) {
-                       tm_hierarchy_uninit(p);
-                       tm_hierarchy_init(p);
-               }
+               if (clear_on_fail)
+                       tm_hierarchy_free(p);
 
                return status;
        }
@@ -2589,7 +2636,7 @@ update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
                return -1;
 
        /* Update the pipe profile used by the current pipe. */
-       if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+       if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
                (int32_t)pipe_profile_id))
                return -1;
 
@@ -2638,7 +2685,7 @@ update_queue_weight(struct rte_eth_dev *dev,
                return -1;
 
        /* Update the pipe profile used by the current pipe. */
-       if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+       if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
                (int32_t)pipe_profile_id))
                return -1;
 
@@ -2771,7 +2818,7 @@ update_subport_rate(struct rte_eth_dev *dev,
        subport_params.tb_size = sp->params.peak.size;
 
        /* Update the subport configuration. */
-       if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
+       if (rte_sched_subport_config(SCHED(p), subport_id,
                &subport_params))
                return -1;
 
@@ -2818,7 +2865,7 @@ update_pipe_rate(struct rte_eth_dev *dev,
                return -1;
 
        /* Update the pipe profile used by the current pipe. */
-       if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+       if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
                (int32_t)pipe_profile_id))
                return -1;
 
@@ -2863,7 +2910,7 @@ update_tc_rate(struct rte_eth_dev *dev,
                return -1;
 
        /* Update the pipe profile used by the current pipe. */
-       if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+       if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
                (int32_t)pipe_profile_id))
                return -1;
 
@@ -2998,8 +3045,7 @@ read_port_stats(struct rte_eth_dev *dev,
                uint32_t tc_ov, id;
 
                /* Stats read */
-               int status = rte_sched_subport_read_stats(
-                       p->soft.tm.sched,
+               int status = rte_sched_subport_read_stats(SCHED(p),
                        subport_id,
                        &s,
                        &tc_ov);
@@ -3012,9 +3058,9 @@ read_port_stats(struct rte_eth_dev *dev,
                                s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
                        nr->stats.n_bytes +=
                                s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
-                       nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+                       nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
                                s.n_pkts_tc_dropped[id];
-                       nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+                       nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
                                s.n_bytes_tc_dropped[id];
                }
        }
@@ -3046,8 +3092,7 @@ read_subport_stats(struct rte_eth_dev *dev,
        uint32_t tc_ov, tc_id;
 
        /* Stats read */
-       int status = rte_sched_subport_read_stats(
-               p->soft.tm.sched,
+       int status = rte_sched_subport_read_stats(SCHED(p),
                subport_id,
                &s,
                &tc_ov);
@@ -3060,9 +3105,9 @@ read_subport_stats(struct rte_eth_dev *dev,
                        s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
                ns->stats.n_bytes +=
                        s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
-               ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+               ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] +=
                        s.n_pkts_tc_dropped[tc_id];
-               ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+               ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
                        s.n_bytes_tc_dropped[tc_id];
        }
 
@@ -3107,8 +3152,7 @@ read_pipe_stats(struct rte_eth_dev *dev,
                        i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
                        i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
 
-               int status = rte_sched_queue_read_stats(
-                       p->soft.tm.sched,
+               int status = rte_sched_queue_read_stats(SCHED(p),
                        qid,
                        &s,
                        &qlen);
@@ -3118,8 +3162,8 @@ read_pipe_stats(struct rte_eth_dev *dev,
                /* Stats accumulate */
                np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
                np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
-               np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
-               np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+               np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
+               np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
                        s.n_bytes_dropped;
                np->stats.leaf.n_pkts_queued = qlen;
        }
@@ -3168,8 +3212,7 @@ read_tc_stats(struct rte_eth_dev *dev,
                        tc_id,
                        i);
 
-               int status = rte_sched_queue_read_stats(
-                       p->soft.tm.sched,
+               int status = rte_sched_queue_read_stats(SCHED(p),
                        qid,
                        &s,
                        &qlen);
@@ -3179,8 +3222,8 @@ read_tc_stats(struct rte_eth_dev *dev,
                /* Stats accumulate */
                nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
                nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
-               nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
-               nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+               nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
+               nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
                        s.n_bytes_dropped;
                nt->stats.leaf.n_pkts_queued = qlen;
        }
@@ -3228,8 +3271,7 @@ read_queue_stats(struct rte_eth_dev *dev,
                tc_id,
                queue_id);
 
-       int status = rte_sched_queue_read_stats(
-               p->soft.tm.sched,
+       int status = rte_sched_queue_read_stats(SCHED(p),
                qid,
                &s,
                &qlen);
@@ -3239,8 +3281,8 @@ read_queue_stats(struct rte_eth_dev *dev,
        /* Stats accumulate */
        nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
        nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
-       nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
-       nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+       nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped;
+       nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] +=
                s.n_bytes_dropped;
        nq->stats.leaf.n_pkts_queued = qlen;