X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_eventdev.c;h=557198f4a18159877f452d9ae2e73f10e793ed33;hb=3c56316517e2d1a99e10f98ebddc0c39c6f6f86d;hp=fd0406747ef3e63413892016aaab006593786334;hpb=7214438d93e6bca4eb6adb94c4ebb63f4d53d5dc;p=dpdk.git diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c index fd0406747e..557198f4a1 100644 --- a/lib/librte_eventdev/rte_eventdev.c +++ b/lib/librte_eventdev/rte_eventdev.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright(c) 2016 Cavium networks. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium networks nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Cavium, Inc */ #include @@ -41,6 +13,7 @@ #include #include +#include #include #include #include @@ -56,42 +29,50 @@ #include #include #include +#include +#include +#include +#include #include "rte_eventdev.h" #include "rte_eventdev_pmd.h" +#include "rte_eventdev_trace.h" -struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; +static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; -struct rte_eventdev *rte_eventdevs = &rte_event_devices[0]; +struct rte_eventdev *rte_eventdevs = rte_event_devices; static struct rte_eventdev_global eventdev_globals = { .nb_devs = 0 }; -struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals; - /* Event dev north bound API implementation */ uint8_t rte_event_dev_count(void) { - return rte_eventdev_globals->nb_devs; + return eventdev_globals.nb_devs; } int rte_event_dev_get_dev_id(const char *name) { int i; + uint8_t cmp; if (!name) return -EINVAL; - for (i = 0; i < rte_eventdev_globals->nb_devs; i++) - if ((strcmp(rte_event_devices[i].data->name, name) - == 0) && - (rte_event_devices[i].attached == - RTE_EVENTDEV_ATTACHED)) + for (i = 0; i < eventdev_globals.nb_devs; i++) { + cmp = (strncmp(rte_event_devices[i].data->name, name, + RTE_EVENTDEV_NAME_MAX_LEN) == 0) || + (rte_event_devices[i].dev ? (strncmp( + rte_event_devices[i].dev->driver->name, name, + RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0); + if (cmp && (rte_event_devices[i].attached == + RTE_EVENTDEV_ATTACHED)) return i; + } return -ENODEV; } @@ -128,55 +109,147 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) return 0; } +int +rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, + uint32_t *caps) +{ + struct rte_eventdev *dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); + + dev = &rte_eventdevs[dev_id]; + + if (caps == NULL) + return -EINVAL; + *caps = 0; + + return dev->dev_ops->eth_rx_adapter_caps_get ? + (*dev->dev_ops->eth_rx_adapter_caps_get)(dev, + &rte_eth_devices[eth_port_id], + caps) + : 0; +} + +int +rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps) +{ + struct rte_eventdev *dev; + const struct rte_event_timer_adapter_ops *ops; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + + dev = &rte_eventdevs[dev_id]; + + if (caps == NULL) + return -EINVAL; + *caps = 0; + + return dev->dev_ops->timer_adapter_caps_get ? + (*dev->dev_ops->timer_adapter_caps_get)(dev, + 0, + caps, + &ops) + : 0; +} + +int +rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, + uint32_t *caps) +{ + struct rte_eventdev *dev; + struct rte_cryptodev *cdev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) + return -EINVAL; + + dev = &rte_eventdevs[dev_id]; + cdev = rte_cryptodev_pmd_get_dev(cdev_id); + + if (caps == NULL) + return -EINVAL; + *caps = 0; + + return dev->dev_ops->crypto_adapter_caps_get ? + (*dev->dev_ops->crypto_adapter_caps_get) + (dev, cdev, caps) : -ENOTSUP; +} + +int +rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, + uint32_t *caps) +{ + struct rte_eventdev *dev; + struct rte_eth_dev *eth_dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); + + dev = &rte_eventdevs[dev_id]; + eth_dev = &rte_eth_devices[eth_port_id]; + + if (caps == NULL) + return -EINVAL; + + *caps = 0; + + return dev->dev_ops->eth_tx_adapter_caps_get ? + (*dev->dev_ops->eth_tx_adapter_caps_get)(dev, + eth_dev, + caps) + : 0; +} + static inline int rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) { uint8_t old_nb_queues = dev->data->nb_queues; - uint8_t *queues_prio; + struct rte_event_queue_conf *queues_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, dev->data->dev_id); /* First time configuration */ - if (dev->data->queues_prio == NULL && nb_queues != 0) { - /* Allocate memory to store queue priority */ - dev->data->queues_prio = rte_zmalloc_socket( - "eventdev->data->queues_prio", - sizeof(dev->data->queues_prio[0]) * nb_queues, + if (dev->data->queues_cfg == NULL && nb_queues != 0) { + /* Allocate memory to store queue configuration */ + dev->data->queues_cfg = rte_zmalloc_socket( + "eventdev->data->queues_cfg", + sizeof(dev->data->queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE, dev->data->socket_id); - if (dev->data->queues_prio == NULL) { + if (dev->data->queues_cfg == NULL) { dev->data->nb_queues = 0; - RTE_EDEV_LOG_ERR("failed to get mem for queue priority," + RTE_EDEV_LOG_ERR("failed to get mem for queue cfg," "nb_queues %u", nb_queues); return -(ENOMEM); } /* Re-configure */ - } else if (dev->data->queues_prio != NULL && nb_queues != 0) { + } else if (dev->data->queues_cfg != NULL && nb_queues != 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->queue_release)(dev, i); - /* Re allocate memory to store queue priority */ - queues_prio = dev->data->queues_prio; - queues_prio = rte_realloc(queues_prio, - sizeof(queues_prio[0]) * nb_queues, + /* Re allocate memory to store queue configuration */ + queues_cfg = dev->data->queues_cfg; + queues_cfg = rte_realloc(queues_cfg, + sizeof(queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE); - if (queues_prio == NULL) { - RTE_EDEV_LOG_ERR("failed to realloc queue priority," + if (queues_cfg == NULL) { + RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory," " nb_queues %u", nb_queues); return -(ENOMEM); } - dev->data->queues_prio = queues_prio; + dev->data->queues_cfg = queues_cfg; if (nb_queues > old_nb_queues) { uint8_t new_qs = nb_queues - old_nb_queues; - memset(queues_prio + old_nb_queues, 0, - sizeof(queues_prio[0]) * new_qs); + memset(queues_cfg + old_nb_queues, 0, + sizeof(queues_cfg[0]) * new_qs); } - } else if (dev->data->queues_prio != NULL && nb_queues == 0) { + } else if (dev->data->queues_cfg != NULL && nb_queues == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) @@ -195,8 +268,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) uint8_t old_nb_ports = dev->data->nb_ports; void **ports; uint16_t *links_map; - uint8_t *ports_dequeue_depth; - uint8_t *ports_enqueue_depth; + struct rte_event_port_conf *ports_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, @@ -214,26 +286,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) return -(ENOMEM); } - /* Allocate memory to store ports dequeue depth */ - dev->data->ports_dequeue_depth = - rte_zmalloc_socket("eventdev->ports_dequeue_depth", - sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports, + /* Allocate memory to store port configurations */ + dev->data->ports_cfg = + rte_zmalloc_socket("eventdev->ports_cfg", + sizeof(dev->data->ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE, dev->data->socket_id); - if (dev->data->ports_dequeue_depth == NULL) { + if (dev->data->ports_cfg == NULL) { dev->data->nb_ports = 0; - RTE_EDEV_LOG_ERR("failed to get mem for port deq meta," - "nb_ports %u", nb_ports); - return -(ENOMEM); - } - - /* Allocate memory to store ports enqueue depth */ - dev->data->ports_enqueue_depth = - rte_zmalloc_socket("eventdev->ports_enqueue_depth", - sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports, - RTE_CACHE_LINE_SIZE, dev->data->socket_id); - if (dev->data->ports_enqueue_depth == NULL) { - dev->data->nb_ports = 0; - RTE_EDEV_LOG_ERR("failed to get mem for port enq meta," + RTE_EDEV_LOG_ERR("failed to get mem for port cfg," "nb_ports %u", nb_ports); return -(ENOMEM); } @@ -257,8 +317,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); ports = dev->data->ports; - ports_dequeue_depth = dev->data->ports_dequeue_depth; - ports_enqueue_depth = dev->data->ports_enqueue_depth; + ports_cfg = dev->data->ports_cfg; links_map = dev->data->links_map; for (i = nb_ports; i < old_nb_ports; i++) @@ -273,22 +332,12 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) return -(ENOMEM); } - /* Realloc memory for ports_dequeue_depth */ - ports_dequeue_depth = rte_realloc(ports_dequeue_depth, - sizeof(ports_dequeue_depth[0]) * nb_ports, + /* Realloc memory for ports_cfg */ + ports_cfg = rte_realloc(ports_cfg, + sizeof(ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE); - if (ports_dequeue_depth == NULL) { - RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta," - " nb_ports %u", nb_ports); - return -(ENOMEM); - } - - /* Realloc memory for ports_enqueue_depth */ - ports_enqueue_depth = rte_realloc(ports_enqueue_depth, - sizeof(ports_enqueue_depth[0]) * nb_ports, - RTE_CACHE_LINE_SIZE); - if (ports_enqueue_depth == NULL) { - RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta," + if (ports_cfg == NULL) { + RTE_EDEV_LOG_ERR("failed to realloc port cfg mem," " nb_ports %u", nb_ports); return -(ENOMEM); } @@ -298,7 +347,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) sizeof(dev->data->links_map[0]) * nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV, RTE_CACHE_LINE_SIZE); - if (dev->data->links_map == NULL) { + if (links_map == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," "nb_ports %u", nb_ports); @@ -314,18 +363,15 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) memset(ports + old_nb_ports, 0, sizeof(ports[0]) * new_ps); - memset(ports_dequeue_depth + old_nb_ports, 0, - sizeof(ports_dequeue_depth[0]) * new_ps); - memset(ports_enqueue_depth + old_nb_ports, 0, - sizeof(ports_enqueue_depth[0]) * new_ps); + memset(ports_cfg + old_nb_ports, 0, + sizeof(ports_cfg[0]) * new_ps); for (i = old_links_map_end; i < links_map_end; i++) links_map[i] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } dev->data->ports = ports; - dev->data->ports_dequeue_depth = ports_dequeue_depth; - dev->data->ports_enqueue_depth = ports_enqueue_depth; + dev->data->ports_cfg = ports_cfg; dev->data->links_map = links_map; } else if (dev->data->ports != NULL && nb_ports == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); @@ -366,9 +412,10 @@ rte_event_dev_configure(uint8_t dev_id, /* Check dequeue_timeout_ns value is in limit */ if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { - if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns + if (dev_conf->dequeue_timeout_ns && + (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns || dev_conf->dequeue_timeout_ns > - info.max_dequeue_timeout_ns) { + info.max_dequeue_timeout_ns)) { RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", dev_id, dev_conf->dequeue_timeout_ns, @@ -426,8 +473,9 @@ rte_event_dev_configure(uint8_t dev_id, dev_id); return -EINVAL; } - if (dev_conf->nb_event_port_dequeue_depth > - info.max_event_port_dequeue_depth) { + if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && + (dev_conf->nb_event_port_dequeue_depth > + info.max_event_port_dequeue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", dev_id, dev_conf->nb_event_port_dequeue_depth, info.max_event_port_dequeue_depth); @@ -440,8 +488,9 @@ rte_event_dev_configure(uint8_t dev_id, dev_id); return -EINVAL; } - if (dev_conf->nb_event_port_enqueue_depth > - info.max_event_port_enqueue_depth) { + if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && + (dev_conf->nb_event_port_enqueue_depth > + info.max_event_port_enqueue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", dev_id, dev_conf->nb_event_port_enqueue_depth, info.max_event_port_enqueue_depth); @@ -477,6 +526,7 @@ rte_event_dev_configure(uint8_t dev_id, } dev->data->event_dev_cap = info.event_dev_cap; + rte_eventdev_trace_configure(dev_id, dev_conf, diag); return diag; } @@ -516,13 +566,13 @@ rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, static inline int is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) { - if (queue_conf && ( + if (queue_conf && + !(queue_conf->event_queue_cfg & + RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ALL_TYPES) || - ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY) + RTE_EVENT_QUEUE_CFG_ALL_TYPES) || + (queue_conf->schedule_type + == RTE_SCHED_TYPE_ATOMIC) )) return 1; else @@ -532,13 +582,13 @@ is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf) static inline int is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf) { - if (queue_conf && ( - ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ALL_TYPES) || + if (queue_conf && + !(queue_conf->event_queue_cfg & + RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && ((queue_conf->event_queue_cfg & - RTE_EVENT_QUEUE_CFG_TYPE_MASK) - == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY) + RTE_EVENT_QUEUE_CFG_ALL_TYPES) || + (queue_conf->schedule_type + == RTE_SCHED_TYPE_ORDERED) )) return 1; else @@ -602,31 +652,11 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, queue_conf = &def_conf; } - dev->data->queues_prio[queue_id] = queue_conf->priority; + dev->data->queues_cfg[queue_id] = *queue_conf; + rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf); return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); } -uint8_t -rte_event_queue_count(uint8_t dev_id) -{ - struct rte_eventdev *dev; - - dev = &rte_eventdevs[dev_id]; - return dev->data->nb_queues; -} - -uint8_t -rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id) -{ - struct rte_eventdev *dev; - - dev = &rte_eventdevs[dev_id]; - if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) - return dev->data->queues_prio[queue_id]; - else - return RTE_EVENT_DEV_PRIORITY_NORMAL; -} - static inline int is_valid_port(struct rte_eventdev *dev, uint8_t port_id) { @@ -708,6 +738,15 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, return -EINVAL; } + if (port_conf && port_conf->disable_implicit_release && + !(dev->data->event_dev_cap & + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) { + RTE_EDEV_LOG_ERR( + "dev%d port%d Implicit release disable not supported", + dev_id, port_id); + return -EINVAL; + } + if (dev->data->dev_started) { RTE_EDEV_LOG_ERR( "device %d must be stopped to allow port setup", dev_id); @@ -723,10 +762,7 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, port_conf = &def_conf; } - dev->data->ports_dequeue_depth[port_id] = - port_conf->dequeue_depth; - dev->data->ports_enqueue_depth[port_id] = - port_conf->enqueue_depth; + dev->data->ports_cfg[port_id] = *port_conf; diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); @@ -734,37 +770,117 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, if (!diag) diag = rte_event_port_unlink(dev_id, port_id, NULL, 0); + rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag); if (diag < 0) return diag; return 0; } -uint8_t -rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id) +int +rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, + uint32_t *attr_value) { struct rte_eventdev *dev; + if (!attr_value) + return -EINVAL; + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; - return dev->data->ports_dequeue_depth[port_id]; + + switch (attr_id) { + case RTE_EVENT_DEV_ATTR_PORT_COUNT: + *attr_value = dev->data->nb_ports; + break; + case RTE_EVENT_DEV_ATTR_QUEUE_COUNT: + *attr_value = dev->data->nb_queues; + break; + case RTE_EVENT_DEV_ATTR_STARTED: + *attr_value = dev->data->dev_started; + break; + default: + return -EINVAL; + } + + return 0; } -uint8_t -rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id) +int +rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, + uint32_t *attr_value) { struct rte_eventdev *dev; + if (!attr_value) + return -EINVAL; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; - return dev->data->ports_enqueue_depth[port_id]; + if (!is_valid_port(dev, port_id)) { + RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); + return -EINVAL; + } + + switch (attr_id) { + case RTE_EVENT_PORT_ATTR_ENQ_DEPTH: + *attr_value = dev->data->ports_cfg[port_id].enqueue_depth; + break; + case RTE_EVENT_PORT_ATTR_DEQ_DEPTH: + *attr_value = dev->data->ports_cfg[port_id].dequeue_depth; + break; + case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD: + *attr_value = dev->data->ports_cfg[port_id].new_event_threshold; + break; + default: + return -EINVAL; + }; + return 0; } -uint8_t -rte_event_port_count(uint8_t dev_id) +int +rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, + uint32_t *attr_value) { + struct rte_event_queue_conf *conf; struct rte_eventdev *dev; + if (!attr_value) + return -EINVAL; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; - return dev->data->nb_ports; + if (!is_valid_queue(dev, queue_id)) { + RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); + return -EINVAL; + } + + conf = &dev->data->queues_cfg[queue_id]; + + switch (attr_id) { + case RTE_EVENT_QUEUE_ATTR_PRIORITY: + *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL; + if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) + *attr_value = conf->priority; + break; + case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS: + *attr_value = conf->nb_atomic_flows; + break; + case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES: + *attr_value = conf->nb_atomic_order_sequences; + break; + case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG: + *attr_value = conf->event_queue_cfg; + break; + case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE: + if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) + return -EOVERFLOW; + + *attr_value = conf->schedule_type; + break; + default: + return -EINVAL; + }; + return 0; } int @@ -778,13 +894,19 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, uint16_t *links_map; int i, diag; - RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP); + + if (*dev->dev_ops->port_link == NULL) { + RTE_EDEV_LOG_ERR("Function not supported\n"); + rte_errno = ENOTSUP; + return 0; + } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - return -EINVAL; + rte_errno = EINVAL; + return 0; } if (queues == NULL) { @@ -803,8 +925,10 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, } for (i = 0; i < nb_links; i++) - if (queues[i] >= dev->data->nb_queues) - return -EINVAL; + if (queues[i] >= dev->data->nb_queues) { + rte_errno = EINVAL; + return 0; + } diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues, priorities, nb_links); @@ -817,6 +941,7 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, for (i = 0; i < diag; i++) links_map[queues[i]] = (uint8_t)priorities[i]; + rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag); return diag; } @@ -826,28 +951,52 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, { struct rte_eventdev *dev; uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; - int i, diag; + int i, diag, j; uint16_t *links_map; - RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP); + + if (*dev->dev_ops->port_unlink == NULL) { + RTE_EDEV_LOG_ERR("Function not supported"); + rte_errno = ENOTSUP; + return 0; + } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - return -EINVAL; + rte_errno = EINVAL; + return 0; } + links_map = dev->data->links_map; + /* Point links_map to this port specific area */ + links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); + if (queues == NULL) { - for (i = 0; i < dev->data->nb_queues; i++) - all_queues[i] = i; + j = 0; + for (i = 0; i < dev->data->nb_queues; i++) { + if (links_map[i] != + EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { + all_queues[j] = i; + j++; + } + } queues = all_queues; - nb_unlinks = dev->data->nb_queues; + } else { + for (j = 0; j < nb_unlinks; j++) { + if (links_map[queues[j]] == + EVENT_QUEUE_SERVICE_PRIORITY_INVALID) + break; + } } + nb_unlinks = j; for (i = 0; i < nb_unlinks; i++) - if (queues[i] >= dev->data->nb_queues) - return -EINVAL; + if (queues[i] >= dev->data->nb_queues) { + rte_errno = EINVAL; + return 0; + } diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues, nb_unlinks); @@ -855,15 +1004,35 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, if (diag < 0) return diag; - links_map = dev->data->links_map; - /* Point links_map to this port specific area */ - links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); for (i = 0; i < diag; i++) links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; + rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag); return diag; } +int +rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id) +{ + struct rte_eventdev *dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; + if (!is_valid_port(dev, port_id)) { + RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); + return -EINVAL; + } + + /* Return 0 if the PMD does not implement unlinks in progress. + * This allows PMDs which handle unlink synchronously to not implement + * this function at all. + */ + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0); + + return (*dev->dev_ops->port_unlinks_in_progress)(dev, + dev->data->ports[port_id]); +} + int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[]) @@ -908,6 +1077,23 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); } +int +rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id) +{ + struct rte_eventdev *dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; + + if (service_id == NULL) + return -EINVAL; + + if (dev->data->service_inited) + *service_id = dev->data->service_id; + + return dev->data->service_inited ? 0 : -ESRCH; +} + int rte_event_dev_dump(uint8_t dev_id, FILE *f) { @@ -1005,6 +1191,16 @@ int rte_event_dev_xstats_reset(uint8_t dev_id, return -ENOTSUP; } +int rte_event_dev_selftest(uint8_t dev_id) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + if (dev->dev_ops->dev_selftest != NULL) + return (*dev->dev_ops->dev_selftest)(); + return -ENOTSUP; +} + int rte_event_dev_start(uint8_t dev_id) { @@ -1024,6 +1220,7 @@ rte_event_dev_start(uint8_t dev_id) } diag = (*dev->dev_ops->dev_start)(dev); + rte_eventdev_trace_start(dev_id, diag); if (diag == 0) dev->data->dev_started = 1; else @@ -1032,6 +1229,23 @@ rte_event_dev_start(uint8_t dev_id) return 0; } +int +rte_event_dev_stop_flush_callback_register(uint8_t dev_id, + eventdev_stop_flush_t callback, void *userdata) +{ + struct rte_eventdev *dev; + + RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id); + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; + + dev->dev_ops->dev_stop_flush = callback; + dev->data->dev_stop_flush_arg = userdata; + + return 0; +} + void rte_event_dev_stop(uint8_t dev_id) { @@ -1051,6 +1265,7 @@ rte_event_dev_stop(uint8_t dev_id) dev->data->dev_started = 0; (*dev->dev_ops->dev_stop)(dev); + rte_eventdev_trace_stop(dev_id); } int @@ -1069,6 +1284,7 @@ rte_event_dev_close(uint8_t dev_id) return -EBUSY; } + rte_eventdev_trace_close(dev_id); return (*dev->dev_ops->dev_close)(dev); } @@ -1115,6 +1331,15 @@ rte_eventdev_find_free_device_index(void) return RTE_EVENT_MAX_DEVS; } +static uint16_t +rte_event_tx_adapter_enqueue(__rte_unused void *port, + __rte_unused struct rte_event ev[], + __rte_unused uint16_t nb_events) +{ + rte_errno = ENOTSUP; + return 0; +} + struct rte_eventdev * rte_event_pmd_allocate(const char *name, int socket_id) { @@ -1135,6 +1360,9 @@ rte_event_pmd_allocate(const char *name, int socket_id) eventdev = &rte_eventdevs[dev_id]; + eventdev->txa_enqueue = rte_event_tx_adapter_enqueue; + eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue; + if (eventdev->data == NULL) { struct rte_eventdev_data *eventdev_data = NULL; @@ -1146,15 +1374,17 @@ rte_event_pmd_allocate(const char *name, int socket_id) eventdev->data = eventdev_data; - snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN, - "%s", name); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eventdev->data->dev_id = dev_id; - eventdev->data->socket_id = socket_id; - eventdev->data->dev_started = 0; + strlcpy(eventdev->data->name, name, + RTE_EVENTDEV_NAME_MAX_LEN); - eventdev->attached = RTE_EVENTDEV_ATTACHED; + eventdev->data->dev_id = dev_id; + eventdev->data->socket_id = socket_id; + eventdev->data->dev_started = 0; + } + eventdev->attached = RTE_EVENTDEV_ATTACHED; eventdev_globals.nb_devs++; } @@ -1171,10 +1401,6 @@ rte_event_pmd_release(struct rte_eventdev *eventdev) if (eventdev == NULL) return -EINVAL; - ret = rte_event_dev_close(eventdev->data->dev_id); - if (ret < 0) - return ret; - eventdev->attached = RTE_EVENTDEV_DETACHED; eventdev_globals.nb_devs--; @@ -1200,136 +1426,305 @@ rte_event_pmd_release(struct rte_eventdev *eventdev) return 0; } -struct rte_eventdev * -rte_event_pmd_vdev_init(const char *name, size_t dev_private_size, - int socket_id) + +static int +handle_dev_list(const char *cmd __rte_unused, + const char *params __rte_unused, + struct rte_tel_data *d) { - struct rte_eventdev *eventdev; + uint8_t dev_id; + int ndev = rte_event_dev_count(); - /* Allocate device structure */ - eventdev = rte_event_pmd_allocate(name, socket_id); - if (eventdev == NULL) - return NULL; + if (ndev < 1) + return -1; - /* Allocate private device structure */ - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eventdev->data->dev_private = - rte_zmalloc_socket("eventdev device private", - dev_private_size, - RTE_CACHE_LINE_SIZE, - socket_id); - - if (eventdev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private device" - " data"); + rte_tel_data_start_array(d, RTE_TEL_INT_VAL); + for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) { + if (rte_eventdevs[dev_id].attached == + RTE_EVENTDEV_ATTACHED) + rte_tel_data_add_array_int(d, dev_id); } - return eventdev; + return 0; } -int -rte_event_pmd_vdev_uninit(const char *name) +static int +handle_port_list(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d) { - struct rte_eventdev *eventdev; + int i; + uint8_t dev_id; + struct rte_eventdev *dev; + char *end_param; - if (name == NULL) - return -EINVAL; + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; - eventdev = rte_event_pmd_get_named_dev(name); - if (eventdev == NULL) - return -ENODEV; + dev_id = strtoul(params, &end_param, 10); + if (*end_param != '\0') + RTE_EDEV_LOG_DEBUG( + "Extra parameters passed to eventdev telemetry command, ignoring"); - /* Free the event device */ - rte_event_pmd_release(eventdev); + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; + + rte_tel_data_start_array(d, RTE_TEL_INT_VAL); + for (i = 0; i < dev->data->nb_ports; i++) + rte_tel_data_add_array_int(d, i); return 0; } -int -rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv, - struct rte_pci_device *pci_dev, - size_t private_data_size, - eventdev_pmd_pci_callback_t devinit) +static int +handle_queue_list(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d) { - struct rte_eventdev *eventdev; + int i; + uint8_t dev_id; + struct rte_eventdev *dev; + char *end_param; - char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN]; + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; - int retval; + dev_id = strtoul(params, &end_param, 10); + if (*end_param != '\0') + RTE_EDEV_LOG_DEBUG( + "Extra parameters passed to eventdev telemetry command, ignoring"); - if (devinit == NULL) - return -EINVAL; + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; - rte_pci_device_name(&pci_dev->addr, eventdev_name, - sizeof(eventdev_name)); + rte_tel_data_start_array(d, RTE_TEL_INT_VAL); + for (i = 0; i < dev->data->nb_queues; i++) + rte_tel_data_add_array_int(d, i); - eventdev = rte_event_pmd_allocate(eventdev_name, - pci_dev->device.numa_node); - if (eventdev == NULL) - return -ENOMEM; + return 0; +} - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eventdev->data->dev_private = - rte_zmalloc_socket( - "eventdev private structure", - private_data_size, - RTE_CACHE_LINE_SIZE, - rte_socket_id()); - - if (eventdev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private " - "device data"); +static int +handle_queue_links(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d) +{ + int i, ret, port_id = 0; + char *end_param; + uint8_t dev_id; + uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; + uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV]; + const char *p_param; + + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; + + /* Get dev ID from parameter string */ + dev_id = strtoul(params, &end_param, 10); + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + + p_param = strtok(end_param, ","); + if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) + return -1; + + port_id = strtoul(p_param, &end_param, 10); + p_param = strtok(NULL, "\0"); + if (p_param != NULL) + RTE_EDEV_LOG_DEBUG( + "Extra parameters passed to eventdev telemetry command, ignoring"); + + ret = rte_event_port_links_get(dev_id, port_id, queues, priorities); + if (ret < 0) + return -1; + + rte_tel_data_start_dict(d); + for (i = 0; i < ret; i++) { + char qid_name[32]; + + snprintf(qid_name, 31, "qid_%u", queues[i]); + rte_tel_data_add_dict_u64(d, qid_name, priorities[i]); } - eventdev->dev = &pci_dev->device; + return 0; +} - /* Invoke PMD device initialization function */ - retval = devinit(eventdev); - if (retval == 0) - return 0; +static int +eventdev_build_telemetry_data(int dev_id, + enum rte_event_dev_xstats_mode mode, + int port_queue_id, + struct rte_tel_data *d) +{ + struct rte_event_dev_xstats_name *xstat_names; + unsigned int *ids; + uint64_t *values; + int i, ret, num_xstats; + + num_xstats = rte_event_dev_xstats_names_get(dev_id, + mode, + port_queue_id, + NULL, + NULL, + 0); + + if (num_xstats < 0) + return -1; + + /* use one malloc for names */ + xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name)) + * num_xstats); + if (xstat_names == NULL) + return -1; + + ids = malloc((sizeof(unsigned int)) * num_xstats); + if (ids == NULL) { + free(xstat_names); + return -1; + } - RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)" - " failed", pci_drv->driver.name, - (unsigned int) pci_dev->id.vendor_id, - (unsigned int) pci_dev->id.device_id); + values = malloc((sizeof(uint64_t)) * num_xstats); + if (values == NULL) { + free(xstat_names); + free(ids); + return -1; + } - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eventdev->data->dev_private); + ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id, + xstat_names, ids, num_xstats); + if (ret < 0 || ret > num_xstats) { + free(xstat_names); + free(ids); + free(values); + return -1; + } - eventdev->attached = RTE_EVENTDEV_DETACHED; - eventdev_globals.nb_devs--; + ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id, + ids, values, num_xstats); + if (ret < 0 || ret > num_xstats) { + free(xstat_names); + free(ids); + free(values); + return -1; + } - return -ENXIO; + rte_tel_data_start_dict(d); + for (i = 0; i < num_xstats; i++) + rte_tel_data_add_dict_u64(d, xstat_names[i].name, + values[i]); + + free(xstat_names); + free(ids); + free(values); + return 0; } -int -rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev, - eventdev_pmd_pci_callback_t devuninit) +static int +handle_dev_xstats(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d) { - struct rte_eventdev *eventdev; - char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN]; - int ret = 0; + int dev_id; + enum rte_event_dev_xstats_mode mode; + char *end_param; - if (pci_dev == NULL) - return -EINVAL; + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; - rte_pci_device_name(&pci_dev->addr, eventdev_name, - sizeof(eventdev_name)); + /* Get dev ID from parameter string */ + dev_id = strtoul(params, &end_param, 10); + if (*end_param != '\0') + RTE_EDEV_LOG_DEBUG( + "Extra parameters passed to eventdev telemetry command, ignoring"); - eventdev = rte_event_pmd_get_named_dev(eventdev_name); - if (eventdev == NULL) - return -ENODEV; + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); - /* Invoke PMD device un-init function */ - if (devuninit) - ret = devuninit(eventdev); - if (ret) - return ret; + mode = RTE_EVENT_DEV_XSTATS_DEVICE; + return eventdev_build_telemetry_data(dev_id, mode, 0, d); +} - /* Free event device */ - rte_event_pmd_release(eventdev); +static int +handle_port_xstats(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d) +{ + int dev_id; + int port_queue_id = 0; + enum rte_event_dev_xstats_mode mode; + char *end_param; + const char *p_param; - eventdev->dev = NULL; + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; - return 0; + /* Get dev ID from parameter string */ + dev_id = strtoul(params, &end_param, 10); + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + + p_param = strtok(end_param, ","); + mode = RTE_EVENT_DEV_XSTATS_PORT; + + if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) + return -1; + + port_queue_id = strtoul(p_param, &end_param, 10); + + p_param = strtok(NULL, "\0"); + if (p_param != NULL) + RTE_EDEV_LOG_DEBUG( + "Extra parameters passed to eventdev telemetry command, ignoring"); + + return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); +} + +static int +handle_queue_xstats(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d) +{ + int dev_id; + int port_queue_id = 0; + enum rte_event_dev_xstats_mode mode; + char *end_param; + const char *p_param; + + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; + + /* Get dev ID from parameter string */ + dev_id = strtoul(params, &end_param, 10); + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + + p_param = strtok(end_param, ","); + mode = RTE_EVENT_DEV_XSTATS_QUEUE; + + if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param)) + return -1; + + port_queue_id = strtoul(p_param, &end_param, 10); + + p_param = strtok(NULL, "\0"); + if (p_param != NULL) + RTE_EDEV_LOG_DEBUG( + "Extra parameters passed to eventdev telemetry command, ignoring"); + + return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d); +} + +RTE_INIT(eventdev_init_telemetry) +{ + rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list, + "Returns list of available eventdevs. Takes no parameters"); + rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list, + "Returns list of available ports. Parameter: DevID"); + rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list, + "Returns list of available queues. Parameter: DevID"); + + rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats, + "Returns stats for an eventdev. Parameter: DevID"); + rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats, + "Returns stats for an eventdev port. Params: DevID,PortID"); + rte_telemetry_register_cmd("/eventdev/queue_xstats", + handle_queue_xstats, + "Returns stats for an eventdev queue. Params: DevID,QueueID"); + rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links, + "Returns links for an eventdev port. Params: DevID,QueueID"); }