X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_eventdev.c;h=851a1190c002d2ea0c7c5194125ba6a6a1a2663a;hb=a6ec31597a0b1f6b6ddf21546f76bbb280f62c37;hp=b13eb00ce2300fe656772e5d0c796bd1cbb48678;hpb=4f0804bbdfb98a4becc6647909b782b784817009;p=dpdk.git diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c index b13eb00ce2..851a1190c0 100644 --- a/lib/librte_eventdev/rte_eventdev.c +++ b/lib/librte_eventdev/rte_eventdev.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright(c) 2016 Cavium networks. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium networks nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Cavium, Inc */ #include @@ -45,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -57,6 +28,7 @@ #include #include #include +#include #include "rte_eventdev.h" #include "rte_eventdev_pmd.h" @@ -125,59 +97,81 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; - dev_info->pci_dev = dev->pci_dev; + dev_info->dev = dev->dev; return 0; } +int +rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id, + uint32_t *caps) +{ + struct rte_eventdev *dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); + + dev = &rte_eventdevs[dev_id]; + + if (caps == NULL) + return -EINVAL; + *caps = 0; + + return dev->dev_ops->eth_rx_adapter_caps_get ? + (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, + &rte_eth_devices[eth_port_id], + caps) + : 0; +} + static inline int rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) { uint8_t old_nb_queues = dev->data->nb_queues; - uint8_t *queues_prio; + struct rte_event_queue_conf *queues_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, dev->data->dev_id); /* First time configuration */ - if (dev->data->queues_prio == NULL && nb_queues != 0) { - /* Allocate memory to store queue priority */ - dev->data->queues_prio = rte_zmalloc_socket( - "eventdev->data->queues_prio", - sizeof(dev->data->queues_prio[0]) * nb_queues, + if (dev->data->queues_cfg == NULL && nb_queues != 0) { + /* Allocate memory to store queue configuration */ + dev->data->queues_cfg = rte_zmalloc_socket( + "eventdev->data->queues_cfg", + sizeof(dev->data->queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE, dev->data->socket_id); - if (dev->data->queues_prio == NULL) { + if (dev->data->queues_cfg == NULL) { dev->data->nb_queues = 0; - RTE_EDEV_LOG_ERR("failed to get mem for queue priority," + RTE_EDEV_LOG_ERR("failed to get mem for queue cfg," "nb_queues %u", nb_queues); return -(ENOMEM); } /* Re-configure */ - } else if (dev->data->queues_prio != NULL && nb_queues != 0) { + } else if (dev->data->queues_cfg != NULL && nb_queues != 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->queue_release)(dev, i); - /* Re allocate memory to store queue priority */ - queues_prio = dev->data->queues_prio; - queues_prio = rte_realloc(queues_prio, - sizeof(queues_prio[0]) * nb_queues, + /* Re allocate memory to store queue configuration */ + queues_cfg = dev->data->queues_cfg; + queues_cfg = rte_realloc(queues_cfg, + sizeof(queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE); - if (queues_prio == NULL) { - RTE_EDEV_LOG_ERR("failed to realloc queue priority," + if (queues_cfg == NULL) { + RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory," " nb_queues %u", nb_queues); return -(ENOMEM); } - dev->data->queues_prio = queues_prio; + dev->data->queues_cfg = queues_cfg; if (nb_queues > old_nb_queues) { uint8_t new_qs = nb_queues - old_nb_queues; - memset(queues_prio + old_nb_queues, 0, - sizeof(queues_prio[0]) * new_qs); + memset(queues_cfg + old_nb_queues, 0, + sizeof(queues_cfg[0]) * new_qs); } - } else if (dev->data->queues_prio != NULL && nb_queues == 0) { + } else if (dev->data->queues_cfg != NULL && nb_queues == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) @@ -188,14 +182,15 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) return 0; } +#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) + static inline int rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) { uint8_t old_nb_ports = dev->data->nb_ports; void **ports; uint16_t *links_map; - uint8_t *ports_dequeue_depth; - uint8_t *ports_enqueue_depth; + struct rte_event_port_conf *ports_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, @@ -213,26 +208,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) return -(ENOMEM); } - /* Allocate memory to store ports dequeue depth */ - dev->data->ports_dequeue_depth = - rte_zmalloc_socket("eventdev->ports_dequeue_depth", - sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports, - RTE_CACHE_LINE_SIZE, dev->data->socket_id); - if (dev->data->ports_dequeue_depth == NULL) { - dev->data->nb_ports = 0; - RTE_EDEV_LOG_ERR("failed to get mem for port deq meta," - "nb_ports %u", nb_ports); - return -(ENOMEM); - } - - /* Allocate memory to store ports enqueue depth */ - dev->data->ports_enqueue_depth = - rte_zmalloc_socket("eventdev->ports_enqueue_depth", - sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports, + /* Allocate memory to store port configurations */ + dev->data->ports_cfg = + rte_zmalloc_socket("eventdev->ports_cfg", + sizeof(dev->data->ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE, dev->data->socket_id); - if (dev->data->ports_enqueue_depth == NULL) { + if (dev->data->ports_cfg == NULL) { dev->data->nb_ports = 0; - RTE_EDEV_LOG_ERR("failed to get mem for port enq meta," + RTE_EDEV_LOG_ERR("failed to get mem for port cfg," "nb_ports %u", nb_ports); return -(ENOMEM); } @@ -249,12 +232,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) "nb_ports %u", nb_ports); return -(ENOMEM); } + for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++) + dev->data->links_map[i] = + EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); ports = dev->data->ports; - ports_dequeue_depth = dev->data->ports_dequeue_depth; - ports_enqueue_depth = dev->data->ports_enqueue_depth; + ports_cfg = dev->data->ports_cfg; links_map = dev->data->links_map; for (i = nb_ports; i < old_nb_ports; i++) @@ -269,22 +254,12 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) return -(ENOMEM); } - /* Realloc memory for ports_dequeue_depth */ - ports_dequeue_depth = rte_realloc(ports_dequeue_depth, - sizeof(ports_dequeue_depth[0]) * nb_ports, + /* Realloc memory for ports_cfg */ + ports_cfg = rte_realloc(ports_cfg, + sizeof(ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE); - if (ports_dequeue_depth == NULL) { - RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta," - " nb_ports %u", nb_ports); - return -(ENOMEM); - } - - /* Realloc memory for ports_enqueue_depth */ - ports_enqueue_depth = rte_realloc(ports_enqueue_depth, - sizeof(ports_enqueue_depth[0]) * nb_ports, - RTE_CACHE_LINE_SIZE); - if (ports_enqueue_depth == NULL) { - RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta," + if (ports_cfg == NULL) { + RTE_EDEV_LOG_ERR("failed to realloc port cfg mem," " nb_ports %u", nb_ports); return -(ENOMEM); } @@ -294,7 +269,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) sizeof(dev->data->links_map[0]) * nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV, RTE_CACHE_LINE_SIZE); - if (dev->data->links_map == NULL) { + if (links_map == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," "nb_ports %u", nb_ports); @@ -303,21 +278,22 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) if (nb_ports > old_nb_ports) { uint8_t new_ps = nb_ports - old_nb_ports; + unsigned int old_links_map_end = + old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; + unsigned int links_map_end = + nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; memset(ports + old_nb_ports, 0, sizeof(ports[0]) * new_ps); - memset(ports_dequeue_depth + old_nb_ports, 0, - sizeof(ports_dequeue_depth[0]) * new_ps); - memset(ports_enqueue_depth + old_nb_ports, 0, - sizeof(ports_enqueue_depth[0]) * new_ps); - memset(links_map + - (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV), - 0, sizeof(ports_enqueue_depth[0]) * new_ps); + memset(ports_cfg + old_nb_ports, 0, + sizeof(ports_cfg[0]) * new_ps); + for (i = old_links_map_end; i < links_map_end; i++) + links_map[i] = + EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } dev->data->ports = ports; - dev->data->ports_dequeue_depth = ports_dequeue_depth; - dev->data->ports_enqueue_depth = ports_enqueue_depth; + dev->data->ports_cfg = ports_cfg; dev->data->links_map = links_map; } else if (dev->data->ports != NULL && nb_ports == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); @@ -357,10 +333,11 @@ rte_event_dev_configure(uint8_t dev_id, (*dev->dev_ops->dev_infos_get)(dev, &info); /* Check dequeue_timeout_ns value is in limit */ - if (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { - if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns + if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { + if (dev_conf->dequeue_timeout_ns && + (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns || dev_conf->dequeue_timeout_ns > - info.max_dequeue_timeout_ns) { + info.max_dequeue_timeout_ns)) { RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", dev_id, dev_conf->dequeue_timeout_ns, @@ -418,8 +395,9 @@ rte_event_dev_configure(uint8_t dev_id, dev_id); return -EINVAL; } - if (dev_conf->nb_event_port_dequeue_depth > - info.max_event_port_dequeue_depth) { + if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && + (dev_conf->nb_event_port_dequeue_depth > + info.max_event_port_dequeue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", dev_id, dev_conf->nb_event_port_dequeue_depth, info.max_event_port_dequeue_depth); @@ -432,8 +410,9 @@ rte_event_dev_configure(uint8_t dev_id, dev_id); return -EINVAL; } - if (dev_conf->nb_event_port_enqueue_depth > - info.max_event_port_enqueue_depth) { + if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && + (dev_conf->nb_event_port_enqueue_depth > + info.max_event_port_enqueue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", dev_id, dev_conf->nb_event_port_enqueue_depth, info.max_event_port_enqueue_depth); @@ -508,13 +487,13 @@ rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, static inline int is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) { - if (queue_conf && ( + if (queue_conf && + !(queue_conf->event_queue_cfg & + RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ALL_TYPES) || - ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY) + RTE_EVENT_QUEUE_CFG_ALL_TYPES) || + (queue_conf->schedule_type + == RTE_SCHED_TYPE_ATOMIC) )) return 1; else @@ -524,13 +503,13 @@ is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) static inline int is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf) { - if (queue_conf && ( - ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ALL_TYPES) || + if (queue_conf && + !(queue_conf->event_queue_cfg & + RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY) + RTE_EVENT_QUEUE_CFG_ALL_TYPES) || + (queue_conf->schedule_type + == RTE_SCHED_TYPE_ORDERED) )) return 1; else @@ -591,35 +570,13 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP); (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); - def_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT; queue_conf = &def_conf; } - dev->data->queues_prio[queue_id] = queue_conf->priority; + dev->data->queues_cfg[queue_id] = *queue_conf; return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); } -uint8_t -rte_event_queue_count(uint8_t dev_id) -{ - struct rte_eventdev *dev; - - dev = &rte_eventdevs[dev_id]; - return dev->data->nb_queues; -} - -uint8_t -rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id) -{ - struct rte_eventdev *dev; - - dev = &rte_eventdevs[dev_id]; - if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) - return dev->data->queues_prio[queue_id]; - else - return RTE_EVENT_DEV_PRIORITY_NORMAL; -} - static inline int is_valid_port(struct rte_eventdev *dev, uint8_t port_id) { @@ -701,6 +658,15 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, return -EINVAL; } + if (port_conf && port_conf->disable_implicit_release && + !(dev->data->event_dev_cap & + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) { + RTE_EDEV_LOG_ERR( + "dev%d port%d Implicit release disable not supported", + dev_id, port_id); + return -EINVAL; + } + if (dev->data->dev_started) { RTE_EDEV_LOG_ERR( "device %d must be stopped to allow port setup", dev_id); @@ -716,10 +682,7 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, port_conf = &def_conf; } - dev->data->ports_dequeue_depth[port_id] = - port_conf->dequeue_depth; - dev->data->ports_enqueue_depth[port_id] = - port_conf->enqueue_depth; + dev->data->ports_cfg[port_id] = *port_conf; diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); @@ -733,31 +696,110 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, return 0; } -uint8_t -rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id) +int +rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, + uint32_t *attr_value) { struct rte_eventdev *dev; + if (!attr_value) + return -EINVAL; + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; - return dev->data->ports_dequeue_depth[port_id]; + + switch (attr_id) { + case RTE_EVENT_DEV_ATTR_PORT_COUNT: + *attr_value = dev->data->nb_ports; + break; + case RTE_EVENT_DEV_ATTR_QUEUE_COUNT: + *attr_value = dev->data->nb_queues; + break; + case RTE_EVENT_DEV_ATTR_STARTED: + *attr_value = dev->data->dev_started; + break; + default: + return -EINVAL; + } + + return 0; } -uint8_t -rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id) +int +rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, + uint32_t *attr_value) { struct rte_eventdev *dev; + if (!attr_value) + return -EINVAL; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; - return dev->data->ports_enqueue_depth[port_id]; + if (!is_valid_port(dev, port_id)) { + RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); + return -EINVAL; + } + + switch (attr_id) { + case RTE_EVENT_PORT_ATTR_ENQ_DEPTH: + *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; + break; + case RTE_EVENT_PORT_ATTR_DEQ_DEPTH: + *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; + break; + case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD: + *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; + break; + default: + return -EINVAL; + }; + return 0; } -uint8_t -rte_event_port_count(uint8_t dev_id) +int +rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, + uint32_t *attr_value) { + struct rte_event_queue_conf *conf; struct rte_eventdev *dev; + if (!attr_value) + return -EINVAL; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; - return dev->data->nb_ports; + if (!is_valid_queue(dev, queue_id)) { + RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); + return -EINVAL; + } + + conf = &dev->data->queues_cfg[queue_id]; + + switch (attr_id) { + case RTE_EVENT_QUEUE_ATTR_PRIORITY: + *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL; + if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) + *attr_value = conf->priority; + break; + case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS: + *attr_value = conf->nb_atomic_flows; + break; + case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES: + *attr_value = conf->nb_atomic_order_sequences; + break; + case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG: + *attr_value = conf->event_queue_cfg; + break; + case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE: + if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) + return -EOVERFLOW; + + *attr_value = conf->schedule_type; + break; + default: + return -EINVAL; + }; + return 0; } int @@ -771,13 +813,19 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, uint16_t *links_map; int i, diag; - RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); dev = &rte_eventdevs[dev_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP); + + if (*dev->dev_ops->port_link == NULL) { + RTE_PMD_DEBUG_TRACE("Function not supported\n"); + rte_errno = -ENOTSUP; + return 0; + } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - return -EINVAL; + rte_errno = -EINVAL; + return 0; } if (queues == NULL) { @@ -796,11 +844,13 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, } for (i = 0; i < nb_links; i++) - if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV) - return -EINVAL; + if (queues[i] >= dev->data->nb_queues) { + rte_errno = -EINVAL; + return 0; + } - diag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], queues, - priorities, nb_links); + diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], + queues, priorities, nb_links); if (diag < 0) return diag; @@ -813,46 +863,65 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, return diag; } -#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) - int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks) { struct rte_eventdev *dev; uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; - int i, diag; + int i, diag, j; uint16_t *links_map; - RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); dev = &rte_eventdevs[dev_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP); + + if (*dev->dev_ops->port_unlink == NULL) { + RTE_PMD_DEBUG_TRACE("Function not supported\n"); + rte_errno = -ENOTSUP; + return 0; + } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - return -EINVAL; + rte_errno = -EINVAL; + return 0; } + links_map = dev->data->links_map; + /* Point links_map to this port specific area */ + links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); + if (queues == NULL) { - for (i = 0; i < dev->data->nb_queues; i++) - all_queues[i] = i; + j = 0; + for (i = 0; i < dev->data->nb_queues; i++) { + if (links_map[i] != + EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { + all_queues[j] = i; + j++; + } + } queues = all_queues; - nb_unlinks = dev->data->nb_queues; + } else { + for (j = 0; j < nb_unlinks; j++) { + if (links_map[queues[j]] == + EVENT_QUEUE_SERVICE_PRIORITY_INVALID) + break; + } } + nb_unlinks = j; for (i = 0; i < nb_unlinks; i++) - if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV) - return -EINVAL; + if (queues[i] >= dev->data->nb_queues) { + rte_errno = -EINVAL; + return 0; + } - diag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues, - nb_unlinks); + diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], + queues, nb_unlinks); if (diag < 0) return diag; - links_map = dev->data->links_map; - /* Point links_map to this port specific area */ - links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); for (i = 0; i < diag; i++) links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; @@ -877,7 +946,7 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, links_map = dev->data->links_map; /* Point links_map to this port specific area */ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); - for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + for (i = 0; i < dev->data->nb_queues; i++) { if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { queues[count] = i; priorities[count] = (uint8_t)links_map[i]; @@ -900,8 +969,24 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, if (timeout_ticks == NULL) return -EINVAL; - (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); - return 0; + return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); +} + +int +rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id) +{ + struct rte_eventdev *dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; + + if (service_id == NULL) + return -EINVAL; + + if (dev->data->service_inited) + *service_id = dev->data->service_id; + + return dev->data->service_inited ? 0 : -ESRCH; } int @@ -918,6 +1003,99 @@ rte_event_dev_dump(uint8_t dev_id, FILE *f) } +static int +xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id) +{ + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + if (dev->dev_ops->xstats_get_names != NULL) + return (*dev->dev_ops->xstats_get_names)(dev, mode, + queue_port_id, + NULL, NULL, 0); + return 0; +} + +int +rte_event_dev_xstats_names_get(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, + struct rte_event_dev_xstats_name *xstats_names, + unsigned int *ids, unsigned int size) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); + const int cnt_expected_entries = xstats_get_count(dev_id, mode, + queue_port_id); + if (xstats_names == NULL || cnt_expected_entries < 0 || + (int)size < cnt_expected_entries) + return cnt_expected_entries; + + /* dev_id checked above */ + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + if (dev->dev_ops->xstats_get_names != NULL) + return (*dev->dev_ops->xstats_get_names)(dev, mode, + queue_port_id, xstats_names, ids, size); + + return -ENOTSUP; +} + +/* retrieve eventdev extended statistics */ +int +rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, const unsigned int ids[], + uint64_t values[], unsigned int n) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + /* implemented by the driver */ + if (dev->dev_ops->xstats_get != NULL) + return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, + ids, values, n); + return -ENOTSUP; +} + +uint64_t +rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, + unsigned int *id) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0); + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + unsigned int temp = -1; + + if (id != NULL) + *id = (unsigned int)-1; + else + id = &temp; /* ensure driver never gets a NULL value */ + + /* implemented by driver */ + if (dev->dev_ops->xstats_get_by_name != NULL) + return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); + return -ENOTSUP; +} + +int rte_event_dev_xstats_reset(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, + const uint32_t ids[], uint32_t nb_ids) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + if (dev->dev_ops->xstats_reset != NULL) + return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, + ids, nb_ids); + return -ENOTSUP; +} + +int rte_event_dev_selftest(uint8_t dev_id) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + if (dev->dev_ops->dev_selftest != NULL) + return (*dev->dev_ops->dev_selftest)(); + return -ENOTSUP; +} + int rte_event_dev_start(uint8_t dev_id) { @@ -984,3 +1162,127 @@ rte_event_dev_close(uint8_t dev_id) return (*dev->dev_ops->dev_close)(dev); } + +static inline int +rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, + int socket_id) +{ + char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; + const struct rte_memzone *mz; + int n; + + /* Generate memzone name */ + n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); + if (n >= (int)sizeof(mz_name)) + return -EINVAL; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + mz = rte_memzone_reserve(mz_name, + sizeof(struct rte_eventdev_data), + socket_id, 0); + } else + mz = rte_memzone_lookup(mz_name); + + if (mz == NULL) + return -ENOMEM; + + *data = mz->addr; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + memset(*data, 0, sizeof(struct rte_eventdev_data)); + + return 0; +} + +static inline uint8_t +rte_eventdev_find_free_device_index(void) +{ + uint8_t dev_id; + + for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { + if (rte_eventdevs[dev_id].attached == + RTE_EVENTDEV_DETACHED) + return dev_id; + } + return RTE_EVENT_MAX_DEVS; +} + +struct rte_eventdev * +rte_event_pmd_allocate(const char *name, int socket_id) +{ + struct rte_eventdev *eventdev; + uint8_t dev_id; + + if (rte_event_pmd_get_named_dev(name) != NULL) { + RTE_EDEV_LOG_ERR("Event device with name %s already " + "allocated!", name); + return NULL; + } + + dev_id = rte_eventdev_find_free_device_index(); + if (dev_id == RTE_EVENT_MAX_DEVS) { + RTE_EDEV_LOG_ERR("Reached maximum number of event devices"); + return NULL; + } + + eventdev = &rte_eventdevs[dev_id]; + + if (eventdev->data == NULL) { + struct rte_eventdev_data *eventdev_data = NULL; + + int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data, + socket_id); + + if (retval < 0 || eventdev_data == NULL) + return NULL; + + eventdev->data = eventdev_data; + + snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN, + "%s", name); + + eventdev->data->dev_id = dev_id; + eventdev->data->socket_id = socket_id; + eventdev->data->dev_started = 0; + + eventdev->attached = RTE_EVENTDEV_ATTACHED; + + eventdev_globals.nb_devs++; + } + + return eventdev; +} + +int +rte_event_pmd_release(struct rte_eventdev *eventdev) +{ + int ret; + char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; + const struct rte_memzone *mz; + + if (eventdev == NULL) + return -EINVAL; + + eventdev->attached = RTE_EVENTDEV_DETACHED; + eventdev_globals.nb_devs--; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_free(eventdev->data->dev_private); + + /* Generate memzone name */ + ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", + eventdev->data->dev_id); + if (ret >= (int)sizeof(mz_name)) + return -EINVAL; + + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + return -ENOMEM; + + ret = rte_memzone_free(mz); + if (ret) + return ret; + } + + eventdev->data = NULL; + return 0; +}