/*
* BSD LICENSE
*
- * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * Copyright(c) 2016 Cavium, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_dev.h>
-#include <rte_pci.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->dev = dev->dev;
return 0;
}
return 0;
}
+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
+
static inline int
rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
{
"nb_ports %u", nb_ports);
return -(ENOMEM);
}
+ for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
+ dev->data->links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
sizeof(dev->data->links_map[0]) * nb_ports *
RTE_EVENT_MAX_QUEUES_PER_DEV,
RTE_CACHE_LINE_SIZE);
- if (dev->data->links_map == NULL) {
+ if (links_map == NULL) {
dev->data->nb_ports = 0;
RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
"nb_ports %u", nb_ports);
if (nb_ports > old_nb_ports) {
uint8_t new_ps = nb_ports - old_nb_ports;
+ unsigned int old_links_map_end =
+ old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+ unsigned int links_map_end =
+ nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
memset(ports + old_nb_ports, 0,
sizeof(ports[0]) * new_ps);
sizeof(ports_dequeue_depth[0]) * new_ps);
memset(ports_enqueue_depth + old_nb_ports, 0,
sizeof(ports_enqueue_depth[0]) * new_ps);
- memset(links_map +
- (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),
- 0, sizeof(ports_enqueue_depth[0]) * new_ps);
+ for (i = old_links_map_end; i < links_map_end; i++)
+ links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
}
dev->data->ports = ports;
(*dev->dev_ops->dev_infos_get)(dev, &info);
/* Check dequeue_timeout_ns value is in limit */
- if (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
- if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+ if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
+ if (dev_conf->dequeue_timeout_ns &&
+ (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
|| dev_conf->dequeue_timeout_ns >
- info.max_dequeue_timeout_ns) {
+ info.max_dequeue_timeout_ns)) {
RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
dev_id, dev_conf->dequeue_timeout_ns,
dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_port_dequeue_depth >
- info.max_event_port_dequeue_depth) {
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_dequeue_depth >
+ info.max_event_port_dequeue_depth)) {
RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
dev_id, dev_conf->nb_event_port_dequeue_depth,
info.max_event_port_dequeue_depth);
dev_id);
return -EINVAL;
}
- if (dev_conf->nb_event_port_enqueue_depth >
- info.max_event_port_enqueue_depth) {
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_enqueue_depth >
+ info.max_event_port_enqueue_depth)) {
RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
dev_id, dev_conf->nb_event_port_enqueue_depth,
info.max_event_port_enqueue_depth);
static inline int
is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
{
- if (queue_conf && (
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
((queue_conf->event_queue_cfg &
RTE_EVENT_QUEUE_CFG_TYPE_MASK)
== RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
static inline int
is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
{
- if (queue_conf && (
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
((queue_conf->event_queue_cfg &
RTE_EVENT_QUEUE_CFG_TYPE_MASK)
== RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
-ENOTSUP);
(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
- def_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT;
queue_conf = &def_conf;
}
return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
}
-uint8_t
-rte_event_queue_count(uint8_t dev_id)
-{
- struct rte_eventdev *dev;
-
- dev = &rte_eventdevs[dev_id];
- return dev->data->nb_queues;
-}
-
-uint8_t
-rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
-{
- struct rte_eventdev *dev;
-
- dev = &rte_eventdevs[dev_id];
- if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
- return dev->data->queues_prio[queue_id];
- else
- return RTE_EVENT_DEV_PRIORITY_NORMAL;
-}
-
static inline int
is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
{
return 0;
}
-uint8_t
-rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
+int
+rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->ports_dequeue_depth[port_id];
+
+ switch (attr_id) {
+ case RTE_EVENT_DEV_ATTR_PORT_COUNT:
+ *attr_value = dev->data->nb_ports;
+ break;
+ case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
+ *attr_value = dev->data->nb_queues;
+ break;
+ case RTE_EVENT_DEV_ATTR_STARTED:
+ *attr_value = dev->data->dev_started;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
-uint8_t
-rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
+int
+rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->ports_enqueue_depth[port_id];
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ switch (attr_id) {
+ case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
+ *attr_value = dev->data->ports_enqueue_depth[port_id];
+ break;
+ case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
+ *attr_value = dev->data->ports_dequeue_depth[port_id];
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
}
-uint8_t
-rte_event_port_count(uint8_t dev_id)
+int
+rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->nb_ports;
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ switch (attr_id) {
+ case RTE_EVENT_QUEUE_ATTR_PRIORITY:
+ *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
+ *attr_value = dev->data->queues_prio[queue_id];
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
}
int
}
for (i = 0; i < nb_links; i++)
- if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
+ if (queues[i] >= dev->data->nb_queues)
return -EINVAL;
- diag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], queues,
- priorities, nb_links);
+ diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
+ queues, priorities, nb_links);
if (diag < 0)
return diag;
return diag;
}
-#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
-
int
rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
uint8_t queues[], uint16_t nb_unlinks)
}
for (i = 0; i < nb_unlinks; i++)
- if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
+ if (queues[i] >= dev->data->nb_queues)
return -EINVAL;
- diag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues,
- nb_unlinks);
+ diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
+ queues, nb_unlinks);
if (diag < 0)
return diag;
links_map = dev->data->links_map;
/* Point links_map to this port specific area */
links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
- for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ for (i = 0; i < dev->data->nb_queues; i++) {
if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
queues[count] = i;
priorities[count] = (uint8_t)links_map[i];
if (timeout_ticks == NULL)
return -EINVAL;
- (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
- return 0;
+ return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
}
int
}
+static int
+xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id,
+ NULL, NULL, 0);
+ return 0;
+}
+
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const int cnt_expected_entries = xstats_get_count(dev_id, mode,
+ queue_port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* dev_id checked above */
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id, xstats_names, ids, size);
+
+ return -ENOTSUP;
+}
+
+/* retrieve eventdev extended statistics */
+int
+rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get != NULL)
+ return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
+ ids, values, n);
+ return -ENOTSUP;
+}
+
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+ unsigned int *id)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ unsigned int temp = -1;
+
+ if (id != NULL)
+ *id = (unsigned int)-1;
+ else
+ id = &temp; /* ensure driver never gets a NULL value */
+
+ /* implemented by driver */
+ if (dev->dev_ops->xstats_get_by_name != NULL)
+ return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
+ return -ENOTSUP;
+}
+
+int rte_event_dev_xstats_reset(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_reset != NULL)
+ return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
+ ids, nb_ids);
+ return -ENOTSUP;
+}
+
int
rte_event_dev_start(uint8_t dev_id)
{
return (*dev->dev_ops->dev_close)(dev);
}
+
+static inline int
+rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+ int socket_id)
+{
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* Generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_eventdev_data),
+ socket_id, 0);
+ } else
+ mz = rte_memzone_lookup(mz_name);
+
+ if (mz == NULL)
+ return -ENOMEM;
+
+ *data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(*data, 0, sizeof(struct rte_eventdev_data));
+
+ return 0;
+}
+
+static inline uint8_t
+rte_eventdev_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
+ if (rte_eventdevs[dev_id].attached ==
+ RTE_EVENTDEV_DETACHED)
+ return dev_id;
+ }
+ return RTE_EVENT_MAX_DEVS;
+}
+
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id)
+{
+ struct rte_eventdev *eventdev;
+ uint8_t dev_id;
+
+ if (rte_event_pmd_get_named_dev(name) != NULL) {
+ RTE_EDEV_LOG_ERR("Event device with name %s already "
+ "allocated!", name);
+ return NULL;
+ }
+
+ dev_id = rte_eventdev_find_free_device_index();
+ if (dev_id == RTE_EVENT_MAX_DEVS) {
+ RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
+ return NULL;
+ }
+
+ eventdev = &rte_eventdevs[dev_id];
+
+ if (eventdev->data == NULL) {
+ struct rte_eventdev_data *eventdev_data = NULL;
+
+ int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
+ socket_id);
+
+ if (retval < 0 || eventdev_data == NULL)
+ return NULL;
+
+ eventdev->data = eventdev_data;
+
+ snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
+ "%s", name);
+
+ eventdev->data->dev_id = dev_id;
+ eventdev->data->socket_id = socket_id;
+ eventdev->data->dev_started = 0;
+
+ eventdev->attached = RTE_EVENTDEV_ATTACHED;
+
+ eventdev_globals.nb_devs++;
+ }
+
+ return eventdev;
+}
+
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev)
+{
+ int ret;
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+
+ if (eventdev == NULL)
+ return -EINVAL;
+
+ eventdev->attached = RTE_EVENTDEV_DETACHED;
+ eventdev_globals.nb_devs--;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_free(eventdev->data->dev_private);
+
+ /* Generate memzone name */
+ ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
+ eventdev->data->dev_id);
+ if (ret >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ return -ENOMEM;
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ return ret;
+ }
+
+ eventdev->data = NULL;
+ return 0;
+}