eventdev: define default value for dequeue timeout
[dpdk.git] / lib / librte_eventdev / rte_eventdev.c
index b13eb00..e706688 100644 (file)
@@ -45,7 +45,6 @@
 #include <rte_log.h>
 #include <rte_debug.h>
 #include <rte_dev.h>
-#include <rte_pci.h>
 #include <rte_memory.h>
 #include <rte_memcpy.h>
 #include <rte_memzone.h>
@@ -125,7 +124,7 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
 
        dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
 
-       dev_info->pci_dev = dev->pci_dev;
+       dev_info->dev = dev->dev;
        return 0;
 }
 
@@ -188,6 +187,8 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
        return 0;
 }
 
+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
+
 static inline int
 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 {
@@ -249,6 +250,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
                                        "nb_ports %u", nb_ports);
                        return -(ENOMEM);
                }
+               for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
+                       dev->data->links_map[i] =
+                               EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
        } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
                RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
 
@@ -303,6 +307,10 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
 
                if (nb_ports > old_nb_ports) {
                        uint8_t new_ps = nb_ports - old_nb_ports;
+                       unsigned int old_links_map_end =
+                               old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+                       unsigned int links_map_end =
+                               nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
 
                        memset(ports + old_nb_ports, 0,
                                sizeof(ports[0]) * new_ps);
@@ -310,9 +318,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
                                sizeof(ports_dequeue_depth[0]) * new_ps);
                        memset(ports_enqueue_depth + old_nb_ports, 0,
                                sizeof(ports_enqueue_depth[0]) * new_ps);
-                       memset(links_map +
-                               (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),
-                               0, sizeof(ports_enqueue_depth[0]) * new_ps);
+                       for (i = old_links_map_end; i < links_map_end; i++)
+                               links_map[i] =
+                                       EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
                }
 
                dev->data->ports = ports;
@@ -357,10 +365,11 @@ rte_event_dev_configure(uint8_t dev_id,
        (*dev->dev_ops->dev_infos_get)(dev, &info);
 
        /* Check dequeue_timeout_ns value is in limit */
-       if (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
-               if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+       if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
+               if (dev_conf->dequeue_timeout_ns &&
+                   (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
                        || dev_conf->dequeue_timeout_ns >
-                                info.max_dequeue_timeout_ns) {
+                                info.max_dequeue_timeout_ns)) {
                        RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
                        " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
                        dev_id, dev_conf->dequeue_timeout_ns,
@@ -418,8 +427,9 @@ rte_event_dev_configure(uint8_t dev_id,
                                        dev_id);
                return -EINVAL;
        }
-       if (dev_conf->nb_event_port_dequeue_depth >
-                        info.max_event_port_dequeue_depth) {
+       if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+                (dev_conf->nb_event_port_dequeue_depth >
+                        info.max_event_port_dequeue_depth)) {
                RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
                dev_id, dev_conf->nb_event_port_dequeue_depth,
                info.max_event_port_dequeue_depth);
@@ -432,8 +442,9 @@ rte_event_dev_configure(uint8_t dev_id,
                                        dev_id);
                return -EINVAL;
        }
-       if (dev_conf->nb_event_port_enqueue_depth >
-                        info.max_event_port_enqueue_depth) {
+       if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+               (dev_conf->nb_event_port_enqueue_depth >
+                        info.max_event_port_enqueue_depth)) {
                RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
                dev_id, dev_conf->nb_event_port_enqueue_depth,
                info.max_event_port_enqueue_depth);
@@ -591,7 +602,6 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
                RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
                                        -ENOTSUP);
                (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
-               def_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT;
                queue_conf = &def_conf;
        }
 
@@ -796,11 +806,11 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
        }
 
        for (i = 0; i < nb_links; i++)
-               if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
+               if (queues[i] >= dev->data->nb_queues)
                        return -EINVAL;
 
-       diag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], queues,
-                                               priorities, nb_links);
+       diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
+                                               queues, priorities, nb_links);
        if (diag < 0)
                return diag;
 
@@ -813,8 +823,6 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
        return diag;
 }
 
-#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
-
 int
 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
                      uint8_t queues[], uint16_t nb_unlinks)
@@ -841,11 +849,11 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
        }
 
        for (i = 0; i < nb_unlinks; i++)
-               if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
+               if (queues[i] >= dev->data->nb_queues)
                        return -EINVAL;
 
-       diag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues,
-                                       nb_unlinks);
+       diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
+                                       queues, nb_unlinks);
 
        if (diag < 0)
                return diag;
@@ -877,7 +885,7 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
        links_map = dev->data->links_map;
        /* Point links_map to this port specific area */
        links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
-       for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+       for (i = 0; i < dev->data->nb_queues; i++) {
                if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
                        queues[count] = i;
                        priorities[count] = (uint8_t)links_map[i];
@@ -900,8 +908,7 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
        if (timeout_ticks == NULL)
                return -EINVAL;
 
-       (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
-       return 0;
+       return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
 }
 
 int
@@ -918,6 +925,89 @@ rte_event_dev_dump(uint8_t dev_id, FILE *f)
 
 }
 
+static int
+xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+               uint8_t queue_port_id)
+{
+       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+       if (dev->dev_ops->xstats_get_names != NULL)
+               return (*dev->dev_ops->xstats_get_names)(dev, mode,
+                                                       queue_port_id,
+                                                       NULL, NULL, 0);
+       return 0;
+}
+
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+               enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+               struct rte_event_dev_xstats_name *xstats_names,
+               unsigned int *ids, unsigned int size)
+{
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+       const int cnt_expected_entries = xstats_get_count(dev_id, mode,
+                                                         queue_port_id);
+       if (xstats_names == NULL || cnt_expected_entries < 0 ||
+                       (int)size < cnt_expected_entries)
+               return cnt_expected_entries;
+
+       /* dev_id checked above */
+       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+       if (dev->dev_ops->xstats_get_names != NULL)
+               return (*dev->dev_ops->xstats_get_names)(dev, mode,
+                               queue_port_id, xstats_names, ids, size);
+
+       return -ENOTSUP;
+}
+
+/* retrieve eventdev extended statistics */
+int
+rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+               uint8_t queue_port_id, const unsigned int ids[],
+               uint64_t values[], unsigned int n)
+{
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+       /* implemented by the driver */
+       if (dev->dev_ops->xstats_get != NULL)
+               return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
+                               ids, values, n);
+       return -ENOTSUP;
+}
+
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+               unsigned int *id)
+{
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
+       const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+       unsigned int temp = -1;
+
+       if (id != NULL)
+               *id = (unsigned int)-1;
+       else
+               id = &temp; /* ensure driver never gets a NULL value */
+
+       /* implemented by driver */
+       if (dev->dev_ops->xstats_get_by_name != NULL)
+               return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
+       return -ENOTSUP;
+}
+
+int rte_event_dev_xstats_reset(uint8_t dev_id,
+               enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
+               const uint32_t ids[], uint32_t nb_ids)
+{
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+       struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+       if (dev->dev_ops->xstats_reset != NULL)
+               return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
+                                                       ids, nb_ids);
+       return -ENOTSUP;
+}
+
 int
 rte_event_dev_start(uint8_t dev_id)
 {
@@ -984,3 +1074,127 @@ rte_event_dev_close(uint8_t dev_id)
 
        return (*dev->dev_ops->dev_close)(dev);
 }
+
+static inline int
+rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+               int socket_id)
+{
+       char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+       const struct rte_memzone *mz;
+       int n;
+
+       /* Generate memzone name */
+       n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
+       if (n >= (int)sizeof(mz_name))
+               return -EINVAL;
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               mz = rte_memzone_reserve(mz_name,
+                               sizeof(struct rte_eventdev_data),
+                               socket_id, 0);
+       } else
+               mz = rte_memzone_lookup(mz_name);
+
+       if (mz == NULL)
+               return -ENOMEM;
+
+       *data = mz->addr;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               memset(*data, 0, sizeof(struct rte_eventdev_data));
+
+       return 0;
+}
+
+static inline uint8_t
+rte_eventdev_find_free_device_index(void)
+{
+       uint8_t dev_id;
+
+       for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
+               if (rte_eventdevs[dev_id].attached ==
+                               RTE_EVENTDEV_DETACHED)
+                       return dev_id;
+       }
+       return RTE_EVENT_MAX_DEVS;
+}
+
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id)
+{
+       struct rte_eventdev *eventdev;
+       uint8_t dev_id;
+
+       if (rte_event_pmd_get_named_dev(name) != NULL) {
+               RTE_EDEV_LOG_ERR("Event device with name %s already "
+                               "allocated!", name);
+               return NULL;
+       }
+
+       dev_id = rte_eventdev_find_free_device_index();
+       if (dev_id == RTE_EVENT_MAX_DEVS) {
+               RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
+               return NULL;
+       }
+
+       eventdev = &rte_eventdevs[dev_id];
+
+       if (eventdev->data == NULL) {
+               struct rte_eventdev_data *eventdev_data = NULL;
+
+               int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
+                               socket_id);
+
+               if (retval < 0 || eventdev_data == NULL)
+                       return NULL;
+
+               eventdev->data = eventdev_data;
+
+               snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
+                               "%s", name);
+
+               eventdev->data->dev_id = dev_id;
+               eventdev->data->socket_id = socket_id;
+               eventdev->data->dev_started = 0;
+
+               eventdev->attached = RTE_EVENTDEV_ATTACHED;
+
+               eventdev_globals.nb_devs++;
+       }
+
+       return eventdev;
+}
+
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev)
+{
+       int ret;
+       char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+       const struct rte_memzone *mz;
+
+       if (eventdev == NULL)
+               return -EINVAL;
+
+       eventdev->attached = RTE_EVENTDEV_DETACHED;
+       eventdev_globals.nb_devs--;
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               rte_free(eventdev->data->dev_private);
+
+               /* Generate memzone name */
+               ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
+                               eventdev->data->dev_id);
+               if (ret >= (int)sizeof(mz_name))
+                       return -EINVAL;
+
+               mz = rte_memzone_lookup(mz_name);
+               if (mz == NULL)
+                       return -ENOMEM;
+
+               ret = rte_memzone_free(mz);
+               if (ret)
+                       return ret;
+       }
+
+       eventdev->data = NULL;
+       return 0;
+}