vdpa/mlx5: add task ring for multi-thread management
[dpdk.git] / lib / eventdev / rte_eventdev.c
index 5db6e85..1dc4f96 100644 (file)
@@ -6,26 +6,15 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <stdarg.h>
 #include <errno.h>
 #include <stdint.h>
 #include <inttypes.h>
-#include <sys/types.h>
-#include <sys/queue.h>
 
 #include <rte_string_fns.h>
-#include <rte_byteorder.h>
 #include <rte_log.h>
-#include <rte_debug.h>
 #include <rte_dev.h>
-#include <rte_memory.h>
-#include <rte_memcpy.h>
 #include <rte_memzone.h>
 #include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
 #include <rte_common.h>
 #include <rte_malloc.h>
 #include <rte_errno.h>
@@ -36,7 +25,7 @@
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 
 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
 
@@ -46,6 +35,9 @@ static struct rte_eventdev_global eventdev_globals = {
        .nb_devs                = 0
 };
 
+/* Public fastpath APIs. */
+struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
 /* Event dev north bound API implementation */
 
 uint8_t
@@ -139,7 +131,7 @@ int
 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
 {
        struct rte_eventdev *dev;
-       const struct rte_event_timer_adapter_ops *ops;
+       const struct event_timer_adapter_ops *ops;
 
        RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
 
@@ -173,11 +165,15 @@ rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
 
        if (caps == NULL)
                return -EINVAL;
-       *caps = 0;
+
+       if (dev->dev_ops->crypto_adapter_caps_get == NULL)
+               *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
+       else
+               *caps = 0;
 
        return dev->dev_ops->crypto_adapter_caps_get ?
                (*dev->dev_ops->crypto_adapter_caps_get)
-               (dev, cdev, caps) : -ENOTSUP;
+               (dev, cdev, caps) : 0;
 }
 
 int
@@ -300,8 +296,8 @@ int
 rte_event_dev_configure(uint8_t dev_id,
                        const struct rte_event_dev_config *dev_conf)
 {
-       struct rte_eventdev *dev;
        struct rte_event_dev_info info;
+       struct rte_eventdev *dev;
        int diag;
 
        RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -470,10 +466,13 @@ rte_event_dev_configure(uint8_t dev_id,
                return diag;
        }
 
+       event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
+
        /* Configure the device */
        diag = (*dev->dev_ops->dev_configure)(dev);
        if (diag != 0) {
                RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+               event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
                event_dev_queue_config(dev, 0);
                event_dev_port_config(dev, 0);
        }
@@ -731,6 +730,25 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
        return 0;
 }
 
+void
+rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
+                      rte_eventdev_port_flush_t release_cb, void *args)
+{
+       struct rte_eventdev *dev;
+
+       RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
+       dev = &rte_eventdevs[dev_id];
+
+       if (!is_valid_port(dev, port_id)) {
+               RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+               return;
+       }
+
+       if (dev->dev_ops->port_quiesce)
+               (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
+                                             release_cb, args);
+}
+
 int
 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
                       uint32_t *attr_value)
@@ -839,12 +857,50 @@ rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
 
                *attr_value = conf->schedule_type;
                break;
+       case RTE_EVENT_QUEUE_ATTR_WEIGHT:
+               *attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
+               if (dev->dev_ops->queue_attr_get)
+                       return (*dev->dev_ops->queue_attr_get)(
+                               dev, queue_id, attr_id, attr_value);
+               break;
+       case RTE_EVENT_QUEUE_ATTR_AFFINITY:
+               *attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
+               if (dev->dev_ops->queue_attr_get)
+                       return (*dev->dev_ops->queue_attr_get)(
+                               dev, queue_id, attr_id, attr_value);
+               break;
        default:
                return -EINVAL;
        };
        return 0;
 }
 
+int
+rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+                        uint64_t attr_value)
+{
+       struct rte_eventdev *dev;
+
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+       dev = &rte_eventdevs[dev_id];
+       if (!is_valid_queue(dev, queue_id)) {
+               RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+               return -EINVAL;
+       }
+
+       if (!(dev->data->event_dev_cap &
+             RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) {
+               RTE_EDEV_LOG_ERR(
+                       "Device %" PRIu8 "does not support changing queue attributes at runtime",
+                       dev_id);
+               return -ENOTSUP;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_attr_set, -ENOTSUP);
+       return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
+                                              attr_value);
+}
+
 int
 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
                    const uint8_t queues[], const uint8_t priorities[],
@@ -1244,6 +1300,8 @@ rte_event_dev_start(uint8_t dev_id)
        else
                return diag;
 
+       event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
+
        return 0;
 }
 
@@ -1284,6 +1342,7 @@ rte_event_dev_stop(uint8_t dev_id)
        dev->data->dev_started = 0;
        (*dev->dev_ops->dev_stop)(dev);
        rte_eventdev_trace_stop(dev_id);
+       event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
 }
 
 int
@@ -1302,6 +1361,7 @@ rte_event_dev_close(uint8_t dev_id)
                return -EBUSY;
        }
 
+       event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
        rte_eventdev_trace_close(dev_id);
        return (*dev->dev_ops->dev_close)(dev);
 }
@@ -1355,24 +1415,6 @@ eventdev_find_free_device_index(void)
        return RTE_EVENT_MAX_DEVS;
 }
 
-static uint16_t
-rte_event_tx_adapter_enqueue(__rte_unused void *port,
-                       __rte_unused struct rte_event ev[],
-                       __rte_unused uint16_t nb_events)
-{
-       rte_errno = ENOTSUP;
-       return 0;
-}
-
-static uint16_t
-rte_event_crypto_adapter_enqueue(__rte_unused void *port,
-                       __rte_unused struct rte_event ev[],
-                       __rte_unused uint16_t nb_events)
-{
-       rte_errno = ENOTSUP;
-       return 0;
-}
-
 struct rte_eventdev *
 rte_event_pmd_allocate(const char *name, int socket_id)
 {
@@ -1393,10 +1435,6 @@ rte_event_pmd_allocate(const char *name, int socket_id)
 
        eventdev = &rte_eventdevs[dev_id];
 
-       eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
-       eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
-       eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
-
        if (eventdev->data == NULL) {
                struct rte_eventdev_data *eventdev_data = NULL;
 
@@ -1435,6 +1473,7 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
        if (eventdev == NULL)
                return -EINVAL;
 
+       event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
        eventdev->attached = RTE_EVENTDEV_DETACHED;
        eventdev_globals.nb_devs--;
 
@@ -1460,6 +1499,15 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
        return 0;
 }
 
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev)
+{
+       if (eventdev == NULL)
+               return;
+
+       event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
+                            eventdev);
+}
 
 static int
 handle_dev_list(const char *cmd __rte_unused,