event/sw: add scheduling logic
[dpdk.git] / drivers / event / sw / sw_evdev.c
index 574696b..ea08b2c 100644 (file)
 #include <rte_memzone.h>
 #include <rte_kvargs.h>
 #include <rte_ring.h>
+#include <rte_errno.h>
 
 #include "sw_evdev.h"
 #include "iq_ring.h"
+#include "event_ring.h"
 
 #define EVENTDEV_NAME_SW_PMD event_sw
 #define NUMA_NODE_ARG "numa_node"
 #define SCHED_QUANTA_ARG "sched_quanta"
 #define CREDIT_QUANTA_ARG "credit_quanta"
 
+static void
+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+static int
+sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+               const uint8_t priorities[], uint16_t num)
+{
+       struct sw_port *p = port;
+       struct sw_evdev *sw = sw_pmd_priv(dev);
+       int i;
+
+       RTE_SET_USED(priorities);
+       for (i = 0; i < num; i++) {
+               struct sw_qid *q = &sw->qids[queues[i]];
+
+               /* check for qid map overflow */
+               if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
+                       rte_errno = -EDQUOT;
+                       break;
+               }
+
+               if (p->is_directed && p->num_qids_mapped > 0) {
+                       rte_errno = -EDQUOT;
+                       break;
+               }
+
+               if (q->type == SW_SCHED_TYPE_DIRECT) {
+                       /* check directed qids only map to one port */
+                       if (p->num_qids_mapped > 0) {
+                               rte_errno = -EDQUOT;
+                               break;
+                       }
+                       /* check port only takes a directed flow */
+                       if (num > 1) {
+                               rte_errno = -EDQUOT;
+                               break;
+                       }
+
+                       p->is_directed = 1;
+                       p->num_qids_mapped = 1;
+               } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
+                       p->num_ordered_qids++;
+                       p->num_qids_mapped++;
+               } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+                       p->num_qids_mapped++;
+               }
+
+               q->cq_map[q->cq_num_mapped_cqs] = p->id;
+               rte_smp_wmb();
+               q->cq_num_mapped_cqs++;
+       }
+       return i;
+}
+
+static int
+sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+               uint16_t nb_unlinks)
+{
+       struct sw_port *p = port;
+       struct sw_evdev *sw = sw_pmd_priv(dev);
+       unsigned int i, j;
+
+       int unlinked = 0;
+       for (i = 0; i < nb_unlinks; i++) {
+               struct sw_qid *q = &sw->qids[queues[i]];
+               for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+                       if (q->cq_map[j] == p->id) {
+                               q->cq_map[j] =
+                                       q->cq_map[q->cq_num_mapped_cqs - 1];
+                               rte_smp_wmb();
+                               q->cq_num_mapped_cqs--;
+                               unlinked++;
+
+                               p->num_qids_mapped--;
+
+                               if (q->type == RTE_SCHED_TYPE_ORDERED)
+                                       p->num_ordered_qids--;
+
+                               continue;
+                       }
+               }
+       }
+       return unlinked;
+}
+
+static int
+sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+               const struct rte_event_port_conf *conf)
+{
+       struct sw_evdev *sw = sw_pmd_priv(dev);
+       struct sw_port *p = &sw->ports[port_id];
+       char buf[QE_RING_NAMESIZE];
+       unsigned int i;
+
+       struct rte_event_dev_info info;
+       sw_info_get(dev, &info);
+
+       /* detect re-configuring and return credits to instance if needed */
+       if (p->initialized) {
+               /* taking credits from pool is done one quanta at a time, and
+                * credits may be spend (counted in p->inflights) or still
+                * available in the port (p->inflight_credits). We must return
+                * the sum to no leak credits
+                */
+               int possible_inflights = p->inflight_credits + p->inflights;
+               rte_atomic32_sub(&sw->inflights, possible_inflights);
+       }
+
+       *p = (struct sw_port){0}; /* zero entire structure */
+       p->id = port_id;
+       p->sw = sw;
+
+       snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
+                       "rx_worker_ring");
+       p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+                       dev->data->socket_id);
+       if (p->rx_worker_ring == NULL) {
+               SW_LOG_ERR("Error creating RX worker ring for port %d\n",
+                               port_id);
+               return -1;
+       }
+
+       p->inflight_max = conf->new_event_threshold;
+
+       snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
+                       "cq_worker_ring");
+       p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
+                       dev->data->socket_id);
+       if (p->cq_worker_ring == NULL) {
+               qe_ring_destroy(p->rx_worker_ring);
+               SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
+                               port_id);
+               return -1;
+       }
+       sw->cq_ring_space[port_id] = conf->dequeue_depth;
+
+       /* set hist list contents to empty */
+       for (i = 0; i < SW_PORT_HIST_LIST; i++) {
+               p->hist_list[i].fid = -1;
+               p->hist_list[i].qid = -1;
+       }
+       dev->data->ports[port_id] = p;
+
+       rte_smp_wmb();
+       p->initialized = 1;
+       return 0;
+}
+
+static void
+sw_port_release(void *port)
+{
+       struct sw_port *p = (void *)port;
+       if (p == NULL)
+               return;
+
+       qe_ring_destroy(p->rx_worker_ring);
+       qe_ring_destroy(p->cq_worker_ring);
+       memset(p, 0, sizeof(*p));
+}
+
 static int32_t
 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
                const struct rte_event_queue_conf *queue_conf)
@@ -249,6 +411,7 @@ sw_dev_configure(const struct rte_eventdev *dev)
        sw->qid_count = conf->nb_event_queues;
        sw->port_count = conf->nb_event_ports;
        sw->nb_events_limit = conf->nb_events_limit;
+       rte_atomic32_set(&sw->inflights, 0);
 
        if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
                return -ENOTSUP;
@@ -319,6 +482,10 @@ sw_probe(const char *name, const char *params)
                        .queue_setup = sw_queue_setup,
                        .queue_release = sw_queue_release,
                        .port_def_conf = sw_port_def_conf,
+                       .port_setup = sw_port_setup,
+                       .port_release = sw_port_release,
+                       .port_link = sw_port_link,
+                       .port_unlink = sw_port_unlink,
        };
 
        static const char *const args[] = {
@@ -386,6 +553,14 @@ sw_probe(const char *name, const char *params)
                return -EFAULT;
        }
        dev->dev_ops = &evdev_sw_ops;
+       dev->enqueue = sw_event_enqueue;
+       dev->enqueue_burst = sw_event_enqueue_burst;
+       dev->dequeue = sw_event_dequeue;
+       dev->dequeue_burst = sw_event_dequeue_burst;
+       dev->schedule = sw_event_schedule;
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
 
        sw = dev->data->dev_private;
        sw->data = dev->data;