#define EVENTDEV_NAME_DSW_PMD event_dsw
+static int
+dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *conf)
+{
+ struct dsw_evdev *dsw = dsw_pmd_priv(dev);
+ struct dsw_port *port;
+ struct rte_event_ring *in_ring;
+ char ring_name[RTE_RING_NAMESIZE];
+
+ port = &dsw->ports[port_id];
+
+ *port = (struct dsw_port) {
+ .id = port_id,
+ .dsw = dsw,
+ .dequeue_depth = conf->dequeue_depth,
+ .enqueue_depth = conf->enqueue_depth,
+ .new_event_threshold = conf->new_event_threshold
+ };
+
+ snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
+ port_id);
+
+ in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
+ dev->data->socket_id,
+ RING_F_SC_DEQ|RING_F_EXACT_SZ);
+
+ if (in_ring == NULL)
+ return -ENOMEM;
+
+ port->in_ring = in_ring;
+
+ dev->data->ports[port_id] = port;
+
+ return 0;
+}
+
+static void
+dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
+ uint8_t port_id __rte_unused,
+ struct rte_event_port_conf *port_conf)
+{
+ *port_conf = (struct rte_event_port_conf) {
+ .new_event_threshold = 1024,
+ .dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
+ .enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
+ };
+}
+
+static void
+dsw_port_release(void *p)
+{
+ struct dsw_port *port = p;
+
+ rte_event_ring_free(port->in_ring);
+}
+
static int
dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
const struct rte_event_queue_conf *conf)
struct dsw_evdev *dsw = dsw_pmd_priv(dev);
const struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ dsw->num_ports = conf->nb_event_ports;
dsw->num_queues = conf->nb_event_queues;
return 0;
}
static struct rte_eventdev_ops dsw_evdev_ops = {
+ .port_setup = dsw_port_setup,
+ .port_def_conf = dsw_port_def_conf,
+ .port_release = dsw_port_release,
.queue_setup = dsw_queue_setup,
.queue_def_conf = dsw_queue_def_conf,
.queue_release = dsw_queue_release,
#ifndef _DSW_EVDEV_H_
#define _DSW_EVDEV_H_
+#include <rte_event_ring.h>
#include <rte_eventdev.h>
#define DSW_PMD_NAME RTE_STR(event_dsw)
#define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
#define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
+/* The rings are dimensioned so that all in-flight events can reside
+ * on any one of the port rings, to avoid the trouble of having to
+ * care about the case where there's no room on the destination port's
+ * input ring.
+ */
+#define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)
+
+struct dsw_port {
+ uint16_t id;
+
+ /* Keeping a pointer here to avoid container_of() calls, which
+ * are expensive since they are very frequent and will result
+ * in an integer multiplication (since the port id is an index
+ * into the dsw_evdev port array).
+ */
+ struct dsw_evdev *dsw;
+
+ uint16_t dequeue_depth;
+ uint16_t enqueue_depth;
+
+ int32_t new_event_threshold;
+
+ struct rte_event_ring *in_ring __rte_cache_aligned;
+} __rte_cache_aligned;
+
struct dsw_queue {
uint8_t schedule_type;
uint16_t num_serving_ports;
struct dsw_evdev {
struct rte_eventdev_data *data;
+ struct dsw_port ports[DSW_MAX_PORTS];
+ uint16_t num_ports;
struct dsw_queue queues[DSW_MAX_QUEUES];
uint8_t num_queues;
};