eal: implement device iteration initialization
[dpdk.git] / drivers / event / dpaa / dpaa_eventdev.c
index ba9b814..5443ef5 100644 (file)
@@ -29,7 +29,7 @@
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_dpaa_bus.h>
 #include <rte_dpaa_logs.h>
-#include <rte_cycles_64.h>
+#include <rte_cycles.h>
 
 #include <dpaa_ethdev.h>
 #include "dpaa_eventdev.h"
@@ -59,6 +59,118 @@ dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
        return 0;
 }
 
+static void
+dpaa_eventq_portal_add(u16 ch_id)
+{
+       uint32_t sdqcr;
+
+       sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
+       qman_static_dequeue_add(sdqcr, NULL);
+}
+
+static uint16_t
+dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
+                        uint16_t nb_events)
+{
+       uint16_t i;
+       struct rte_mbuf *mbuf;
+
+       RTE_SET_USED(port);
+       /*Release all the contexts saved previously*/
+       for (i = 0; i < nb_events; i++) {
+               switch (ev[i].op) {
+               case RTE_EVENT_OP_RELEASE:
+                       qman_dca_index(ev[i].impl_opaque, 0);
+                       mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+                       mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+                       DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+                       DPAA_PER_LCORE_DQRR_SIZE--;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return nb_events;
+}
+
+static uint16_t
+dpaa_event_enqueue(void *port, const struct rte_event *ev)
+{
+       return dpaa_event_enqueue_burst(port, ev, 1);
+}
+
+static uint16_t
+dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
+                        uint16_t nb_events, uint64_t timeout_ticks)
+{
+       int ret;
+       u16 ch_id;
+       void *buffers[8];
+       u32 num_frames, i;
+       uint64_t wait_time, cur_ticks, start_ticks;
+       struct dpaa_port *portal = (struct dpaa_port *)port;
+       struct rte_mbuf *mbuf;
+
+       if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+               /* Affine current thread context to a qman portal */
+               ret = rte_dpaa_portal_init((void *)0);
+               if (ret) {
+                       DPAA_EVENTDEV_ERR("Unable to initialize portal");
+                       return ret;
+               }
+       }
+
+       if (unlikely(!portal->is_port_linked)) {
+               /*
+                * Affine event queue for current thread context
+                * to a qman portal.
+                */
+               for (i = 0; i < portal->num_linked_evq; i++) {
+                       ch_id = portal->evq_info[i].ch_id;
+                       dpaa_eventq_portal_add(ch_id);
+               }
+               portal->is_port_linked = true;
+       }
+
+       /* Check if there are atomic contexts to be released */
+       i = 0;
+       while (DPAA_PER_LCORE_DQRR_SIZE) {
+               if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
+                       qman_dca_index(i, 0);
+                       mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+                       mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+                       DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+                       DPAA_PER_LCORE_DQRR_SIZE--;
+               }
+               i++;
+       }
+       DPAA_PER_LCORE_DQRR_HELD = 0;
+
+       if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
+               wait_time = timeout_ticks;
+       else
+               wait_time = portal->timeout;
+
+       /* Lets dequeue the frames */
+       start_ticks = rte_get_timer_cycles();
+       wait_time += start_ticks;
+       do {
+               num_frames = qman_portal_dequeue(ev, nb_events, buffers);
+               if (num_frames != 0)
+                       break;
+               cur_ticks = rte_get_timer_cycles();
+       } while (cur_ticks < wait_time);
+
+       return num_frames;
+}
+
+static uint16_t
+dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+       return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
 static void
 dpaa_event_dev_info_get(struct rte_eventdev *dev,
                        struct rte_event_dev_info *dev_info)
@@ -201,15 +313,284 @@ dpaa_event_dev_close(struct rte_eventdev *dev)
        return 0;
 }
 
+static void
+dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+                         struct rte_event_queue_conf *queue_conf)
+{
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(queue_id);
+
+       memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+       queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
+       queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+}
+
+static int
+dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+                      const struct rte_event_queue_conf *queue_conf)
+{
+       struct dpaa_eventdev *priv = dev->data->dev_private;
+       struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
+
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       switch (queue_conf->schedule_type) {
+       case RTE_SCHED_TYPE_PARALLEL:
+       case RTE_SCHED_TYPE_ATOMIC:
+               break;
+       case RTE_SCHED_TYPE_ORDERED:
+               EVENTDEV_DRV_ERR("Schedule type is not supported.");
+               return -1;
+       }
+       evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+       evq_info->event_queue_id = queue_id;
+
+       return 0;
+}
+
+static void
+dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(queue_id);
+}
+
+static void
+dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
+                                struct rte_event_port_conf *port_conf)
+{
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(port_id);
+
+       port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
+       port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+       port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+}
+
+static int
+dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+                     const struct rte_event_port_conf *port_conf)
+{
+       struct dpaa_eventdev *eventdev = dev->data->dev_private;
+
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(port_conf);
+       dev->data->ports[port_id] = &eventdev->ports[port_id];
+
+       return 0;
+}
+
+static void
+dpaa_event_port_release(void *port)
+{
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(port);
+}
+
+static int
+dpaa_event_port_link(struct rte_eventdev *dev, void *port,
+                    const uint8_t queues[], const uint8_t priorities[],
+                    uint16_t nb_links)
+{
+       struct dpaa_eventdev *priv = dev->data->dev_private;
+       struct dpaa_port *event_port = (struct dpaa_port *)port;
+       struct dpaa_eventq *event_queue;
+       uint8_t eventq_id;
+       int i;
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(priorities);
+
+       /* First check that input configuration are valid */
+       for (i = 0; i < nb_links; i++) {
+               eventq_id = queues[i];
+               event_queue = &priv->evq_info[eventq_id];
+               if ((event_queue->event_queue_cfg
+                       & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
+                       && (event_queue->event_port)) {
+                       return -EINVAL;
+               }
+       }
+
+       for (i = 0; i < nb_links; i++) {
+               eventq_id = queues[i];
+               event_queue = &priv->evq_info[eventq_id];
+               event_port->evq_info[i].event_queue_id = eventq_id;
+               event_port->evq_info[i].ch_id = event_queue->ch_id;
+               event_queue->event_port = port;
+       }
+
+       event_port->num_linked_evq = event_port->num_linked_evq + i;
+
+       return (int)i;
+}
+
+static int
+dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
+                      uint8_t queues[], uint16_t nb_links)
+{
+       int i;
+       uint8_t eventq_id;
+       struct dpaa_eventq *event_queue;
+       struct dpaa_eventdev *priv = dev->data->dev_private;
+       struct dpaa_port *event_port = (struct dpaa_port *)port;
+
+       if (!event_port->num_linked_evq)
+               return nb_links;
+
+       for (i = 0; i < nb_links; i++) {
+               eventq_id = queues[i];
+               event_port->evq_info[eventq_id].event_queue_id = -1;
+               event_port->evq_info[eventq_id].ch_id = 0;
+               event_queue = &priv->evq_info[eventq_id];
+               event_queue->event_port = NULL;
+       }
+
+       event_port->num_linked_evq = event_port->num_linked_evq - i;
+
+       return (int)i;
+}
+
+static int
+dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+                                  const struct rte_eth_dev *eth_dev,
+                                  uint32_t *caps)
+{
+       const char *ethdev_driver = eth_dev->device->driver->name;
+
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(dev);
+
+       if (!strcmp(ethdev_driver, "net_dpaa"))
+               *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
+       else
+               *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+       return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_add(
+               const struct rte_eventdev *dev,
+               const struct rte_eth_dev *eth_dev,
+               int32_t rx_queue_id,
+               const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+       struct dpaa_eventdev *eventdev = dev->data->dev_private;
+       uint8_t ev_qid = queue_conf->ev.queue_id;
+       u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
+       struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+       int ret, i;
+
+       EVENTDEV_DRV_FUNC_TRACE();
 
+       if (rx_queue_id == -1) {
+               for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+                       ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
+                                                    queue_conf);
+                       if (ret) {
+                               EVENTDEV_DRV_ERR(
+                                       "Event Queue attach failed:%d\n", ret);
+                               goto detach_configured_queues;
+                       }
+               }
+               return 0;
+       }
+
+       ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
+       if (ret)
+               EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
+       return ret;
+
+detach_configured_queues:
+
+       for (i = (i - 1); i >= 0 ; i--)
+               dpaa_eth_eventq_detach(eth_dev, i);
+
+       return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+                                   const struct rte_eth_dev *eth_dev,
+                                   int32_t rx_queue_id)
+{
+       int ret, i;
+       struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(dev);
+       if (rx_queue_id == -1) {
+               for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+                       ret = dpaa_eth_eventq_detach(eth_dev, i);
+                       if (ret)
+                               EVENTDEV_DRV_ERR(
+                                       "Event Queue detach failed:%d\n", ret);
+               }
+
+               return 0;
+       }
+
+       ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
+       if (ret)
+               EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
+       return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
+                               const struct rte_eth_dev *eth_dev)
+{
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(eth_dev);
+
+       return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+                              const struct rte_eth_dev *eth_dev)
+{
+       EVENTDEV_DRV_FUNC_TRACE();
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(eth_dev);
+
+       return 0;
+}
 
-static const struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct rte_eventdev_ops dpaa_eventdev_ops = {
        .dev_infos_get    = dpaa_event_dev_info_get,
        .dev_configure    = dpaa_event_dev_configure,
        .dev_start        = dpaa_event_dev_start,
        .dev_stop         = dpaa_event_dev_stop,
        .dev_close        = dpaa_event_dev_close,
+       .queue_def_conf   = dpaa_event_queue_def_conf,
+       .queue_setup      = dpaa_event_queue_setup,
+       .queue_release    = dpaa_event_queue_release,
+       .port_def_conf    = dpaa_event_port_default_conf_get,
+       .port_setup       = dpaa_event_port_setup,
+       .port_release       = dpaa_event_port_release,
+       .port_link        = dpaa_event_port_link,
+       .port_unlink      = dpaa_event_port_unlink,
        .timeout_ticks    = dpaa_event_dequeue_timeout_ticks,
+       .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
+       .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
+       .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
+       .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
+       .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
 };
 
 static int
@@ -227,6 +608,10 @@ dpaa_event_dev_create(const char *name)
        }
 
        eventdev->dev_ops       = &dpaa_eventdev_ops;
+       eventdev->enqueue       = dpaa_event_enqueue;
+       eventdev->enqueue_burst = dpaa_event_enqueue_burst;
+       eventdev->dequeue       = dpaa_event_dequeue;
+       eventdev->dequeue_burst = dpaa_event_dequeue_burst;
 
        /* For secondary processes, the primary has done all the work */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)