net/bnxt: remove EEM system memory support
[dpdk.git] / drivers / event / dsw / dsw_evdev.c
index bcfa17b..e796975 100644 (file)
@@ -8,6 +8,7 @@
 #include <rte_eventdev_pmd.h>
 #include <rte_eventdev_pmd_vdev.h>
 #include <rte_random.h>
+#include <rte_ring_elem.h>
 
 #include "dsw_evdev.h"
 
@@ -20,6 +21,7 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
        struct dsw_evdev *dsw = dsw_pmd_priv(dev);
        struct dsw_port *port;
        struct rte_event_ring *in_ring;
+       struct rte_ring *ctl_in_ring;
        char ring_name[RTE_RING_NAMESIZE];
 
        port = &dsw->ports[port_id];
@@ -42,13 +44,32 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
        if (in_ring == NULL)
                return -ENOMEM;
 
+       snprintf(ring_name, sizeof(ring_name), "dswctl%d_p%u",
+                dev->data->dev_id, port_id);
+
+       ctl_in_ring = rte_ring_create_elem(ring_name,
+                                          sizeof(struct dsw_ctl_msg),
+                                          DSW_CTL_IN_RING_SIZE,
+                                          dev->data->socket_id,
+                                          RING_F_SC_DEQ|RING_F_EXACT_SZ);
+
+       if (ctl_in_ring == NULL) {
+               rte_event_ring_free(in_ring);
+               return -ENOMEM;
+       }
+
        port->in_ring = in_ring;
+       port->ctl_in_ring = ctl_in_ring;
 
        rte_atomic16_init(&port->load);
+       rte_atomic32_init(&port->immigration_load);
 
        port->load_update_interval =
                (DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;
 
+       port->migration_interval =
+               (DSW_MIGRATION_INTERVAL * rte_get_timer_hz()) / US_PER_S;
+
        dev->data->ports[port_id] = port;
 
        return 0;
@@ -72,6 +93,7 @@ dsw_port_release(void *p)
        struct dsw_port *port = p;
 
        rte_event_ring_free(port->in_ring);
+       rte_ring_free(port->ctl_in_ring);
 }
 
 static int
@@ -84,9 +106,6 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
        if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
                return -ENOTSUP;
 
-       if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
-               return -ENOTSUP;
-
        /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
         * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
         * the queue will only have a single serving port, no
@@ -95,8 +114,12 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
         */
        if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
                queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
-       else /* atomic or parallel */
+       else {
+               if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
+                       return -ENOTSUP;
+               /* atomic or parallel */
                queue->schedule_type = conf->schedule_type;
+       }
 
        queue->num_serving_ports = 0;
 
@@ -199,7 +222,9 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
                .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
                .max_num_events = DSW_MAX_EVENTS,
                .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
-               RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
+               RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
+               RTE_EVENT_DEV_CAP_NONSEQ_MODE|
+               RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
        };
 }
 
@@ -272,6 +297,14 @@ dsw_port_drain_buf(uint8_t dev_id, struct rte_event *buf, uint16_t buf_len,
                flush(dev_id, buf[i], flush_arg);
 }
 
+static void
+dsw_port_drain_paused(uint8_t dev_id, struct dsw_port *port,
+                     eventdev_stop_flush_t flush, void *flush_arg)
+{
+       dsw_port_drain_buf(dev_id, port->paused_events, port->paused_events_len,
+                          flush, flush_arg);
+}
+
 static void
 dsw_port_drain_out(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port,
                   eventdev_stop_flush_t flush, void *flush_arg)
@@ -308,6 +341,7 @@ dsw_drain(uint8_t dev_id, struct dsw_evdev *dsw,
                struct dsw_port *port = &dsw->ports[port_id];
 
                dsw_port_drain_out(dev_id, dsw, port, flush, flush_arg);
+               dsw_port_drain_paused(dev_id, port, flush, flush_arg);
                dsw_port_drain_in_ring(dev_id, port, flush, flush_arg);
        }
 }
@@ -351,7 +385,10 @@ static struct rte_eventdev_ops dsw_evdev_ops = {
        .dev_configure = dsw_configure,
        .dev_start = dsw_start,
        .dev_stop = dsw_stop,
-       .dev_close = dsw_close
+       .dev_close = dsw_close,
+       .xstats_get = dsw_xstats_get,
+       .xstats_get_names = dsw_xstats_get_names,
+       .xstats_get_by_name = dsw_xstats_get_by_name
 };
 
 static int