1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <ethdev_driver.h>
33 #include <rte_cryptodev.h>
34 #include <cryptodev_pmd.h>
35 #include <rte_telemetry.h>
37 #include "rte_eventdev.h"
38 #include "eventdev_pmd.h"
39 #include "rte_eventdev_trace.h"
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
45 static struct rte_eventdev_global eventdev_globals = {
49 /* Event dev north bound API implementation */
52 rte_event_dev_count(void)
54 return eventdev_globals.nb_devs;
58 rte_event_dev_get_dev_id(const char *name)
66 for (i = 0; i < eventdev_globals.nb_devs; i++) {
67 cmp = (strncmp(rte_event_devices[i].data->name, name,
68 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
69 (rte_event_devices[i].dev ? (strncmp(
70 rte_event_devices[i].dev->driver->name, name,
71 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
72 if (cmp && (rte_event_devices[i].attached ==
73 RTE_EVENTDEV_ATTACHED))
80 rte_event_dev_socket_id(uint8_t dev_id)
82 struct rte_eventdev *dev;
84 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
85 dev = &rte_eventdevs[dev_id];
87 return dev->data->socket_id;
91 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
93 struct rte_eventdev *dev;
95 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
96 dev = &rte_eventdevs[dev_id];
101 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
104 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
106 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
108 dev_info->dev = dev->dev;
113 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
116 struct rte_eventdev *dev;
118 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
119 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
121 dev = &rte_eventdevs[dev_id];
126 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
127 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
131 return dev->dev_ops->eth_rx_adapter_caps_get ?
132 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
133 &rte_eth_devices[eth_port_id],
139 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
141 struct rte_eventdev *dev;
142 const struct rte_event_timer_adapter_ops *ops;
144 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
146 dev = &rte_eventdevs[dev_id];
152 return dev->dev_ops->timer_adapter_caps_get ?
153 (*dev->dev_ops->timer_adapter_caps_get)(dev,
161 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
164 struct rte_eventdev *dev;
165 struct rte_cryptodev *cdev;
167 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
168 if (!rte_cryptodev_is_valid_dev(cdev_id))
171 dev = &rte_eventdevs[dev_id];
172 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
178 return dev->dev_ops->crypto_adapter_caps_get ?
179 (*dev->dev_ops->crypto_adapter_caps_get)
180 (dev, cdev, caps) : -ENOTSUP;
184 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
187 struct rte_eventdev *dev;
188 struct rte_eth_dev *eth_dev;
190 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
191 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
193 dev = &rte_eventdevs[dev_id];
194 eth_dev = &rte_eth_devices[eth_port_id];
199 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
200 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
204 return dev->dev_ops->eth_tx_adapter_caps_get ?
205 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
212 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
214 uint8_t old_nb_queues = dev->data->nb_queues;
215 struct rte_event_queue_conf *queues_cfg;
218 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
221 if (nb_queues != 0) {
222 queues_cfg = dev->data->queues_cfg;
223 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
225 for (i = nb_queues; i < old_nb_queues; i++)
226 (*dev->dev_ops->queue_release)(dev, i);
229 if (nb_queues > old_nb_queues) {
230 uint8_t new_qs = nb_queues - old_nb_queues;
232 memset(queues_cfg + old_nb_queues, 0,
233 sizeof(queues_cfg[0]) * new_qs);
236 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
238 for (i = nb_queues; i < old_nb_queues; i++)
239 (*dev->dev_ops->queue_release)(dev, i);
242 dev->data->nb_queues = nb_queues;
246 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
249 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
251 uint8_t old_nb_ports = dev->data->nb_ports;
254 struct rte_event_port_conf *ports_cfg;
257 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
260 if (nb_ports != 0) { /* re-config */
261 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
263 ports = dev->data->ports;
264 ports_cfg = dev->data->ports_cfg;
265 links_map = dev->data->links_map;
267 for (i = nb_ports; i < old_nb_ports; i++)
268 (*dev->dev_ops->port_release)(ports[i]);
270 if (nb_ports > old_nb_ports) {
271 uint8_t new_ps = nb_ports - old_nb_ports;
272 unsigned int old_links_map_end =
273 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
274 unsigned int links_map_end =
275 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
277 memset(ports + old_nb_ports, 0,
278 sizeof(ports[0]) * new_ps);
279 memset(ports_cfg + old_nb_ports, 0,
280 sizeof(ports_cfg[0]) * new_ps);
281 for (i = old_links_map_end; i < links_map_end; i++)
283 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
288 ports = dev->data->ports;
289 for (i = nb_ports; i < old_nb_ports; i++) {
290 (*dev->dev_ops->port_release)(ports[i]);
295 dev->data->nb_ports = nb_ports;
300 rte_event_dev_configure(uint8_t dev_id,
301 const struct rte_event_dev_config *dev_conf)
303 struct rte_eventdev *dev;
304 struct rte_event_dev_info info;
307 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
308 dev = &rte_eventdevs[dev_id];
310 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
311 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
313 if (dev->data->dev_started) {
315 "device %d must be stopped to allow configuration", dev_id);
319 if (dev_conf == NULL)
322 (*dev->dev_ops->dev_infos_get)(dev, &info);
324 /* Check dequeue_timeout_ns value is in limit */
325 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
326 if (dev_conf->dequeue_timeout_ns &&
327 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
328 || dev_conf->dequeue_timeout_ns >
329 info.max_dequeue_timeout_ns)) {
330 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
331 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
332 dev_id, dev_conf->dequeue_timeout_ns,
333 info.min_dequeue_timeout_ns,
334 info.max_dequeue_timeout_ns);
339 /* Check nb_events_limit is in limit */
340 if (dev_conf->nb_events_limit > info.max_num_events) {
341 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
342 dev_id, dev_conf->nb_events_limit, info.max_num_events);
346 /* Check nb_event_queues is in limit */
347 if (!dev_conf->nb_event_queues) {
348 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
352 if (dev_conf->nb_event_queues > info.max_event_queues +
353 info.max_single_link_event_port_queue_pairs) {
354 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
355 dev_id, dev_conf->nb_event_queues,
356 info.max_event_queues,
357 info.max_single_link_event_port_queue_pairs);
360 if (dev_conf->nb_event_queues -
361 dev_conf->nb_single_link_event_port_queues >
362 info.max_event_queues) {
363 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
364 dev_id, dev_conf->nb_event_queues,
365 dev_conf->nb_single_link_event_port_queues,
366 info.max_event_queues);
369 if (dev_conf->nb_single_link_event_port_queues >
370 dev_conf->nb_event_queues) {
371 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
373 dev_conf->nb_single_link_event_port_queues,
374 dev_conf->nb_event_queues);
378 /* Check nb_event_ports is in limit */
379 if (!dev_conf->nb_event_ports) {
380 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
383 if (dev_conf->nb_event_ports > info.max_event_ports +
384 info.max_single_link_event_port_queue_pairs) {
385 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
386 dev_id, dev_conf->nb_event_ports,
387 info.max_event_ports,
388 info.max_single_link_event_port_queue_pairs);
391 if (dev_conf->nb_event_ports -
392 dev_conf->nb_single_link_event_port_queues
393 > info.max_event_ports) {
394 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
395 dev_id, dev_conf->nb_event_ports,
396 dev_conf->nb_single_link_event_port_queues,
397 info.max_event_ports);
401 if (dev_conf->nb_single_link_event_port_queues >
402 dev_conf->nb_event_ports) {
404 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
406 dev_conf->nb_single_link_event_port_queues,
407 dev_conf->nb_event_ports);
411 /* Check nb_event_queue_flows is in limit */
412 if (!dev_conf->nb_event_queue_flows) {
413 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
416 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
417 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
418 dev_id, dev_conf->nb_event_queue_flows,
419 info.max_event_queue_flows);
423 /* Check nb_event_port_dequeue_depth is in limit */
424 if (!dev_conf->nb_event_port_dequeue_depth) {
425 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
429 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
430 (dev_conf->nb_event_port_dequeue_depth >
431 info.max_event_port_dequeue_depth)) {
432 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
433 dev_id, dev_conf->nb_event_port_dequeue_depth,
434 info.max_event_port_dequeue_depth);
438 /* Check nb_event_port_enqueue_depth is in limit */
439 if (!dev_conf->nb_event_port_enqueue_depth) {
440 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
444 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
445 (dev_conf->nb_event_port_enqueue_depth >
446 info.max_event_port_enqueue_depth)) {
447 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
448 dev_id, dev_conf->nb_event_port_enqueue_depth,
449 info.max_event_port_enqueue_depth);
453 /* Copy the dev_conf parameter into the dev structure */
454 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
456 /* Setup new number of queues and reconfigure device. */
457 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
459 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
464 /* Setup new number of ports and reconfigure device. */
465 diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
467 event_dev_queue_config(dev, 0);
468 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
473 /* Configure the device */
474 diag = (*dev->dev_ops->dev_configure)(dev);
476 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
477 event_dev_queue_config(dev, 0);
478 event_dev_port_config(dev, 0);
481 dev->data->event_dev_cap = info.event_dev_cap;
482 rte_eventdev_trace_configure(dev_id, dev_conf, diag);
487 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
489 if (queue_id < dev->data->nb_queues && queue_id <
490 RTE_EVENT_MAX_QUEUES_PER_DEV)
497 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
498 struct rte_event_queue_conf *queue_conf)
500 struct rte_eventdev *dev;
502 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
503 dev = &rte_eventdevs[dev_id];
505 if (queue_conf == NULL)
508 if (!is_valid_queue(dev, queue_id)) {
509 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
513 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
514 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
515 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
520 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
523 !(queue_conf->event_queue_cfg &
524 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
525 ((queue_conf->event_queue_cfg &
526 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
527 (queue_conf->schedule_type
528 == RTE_SCHED_TYPE_ATOMIC)
536 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
539 !(queue_conf->event_queue_cfg &
540 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
541 ((queue_conf->event_queue_cfg &
542 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
543 (queue_conf->schedule_type
544 == RTE_SCHED_TYPE_ORDERED)
553 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
554 const struct rte_event_queue_conf *queue_conf)
556 struct rte_eventdev *dev;
557 struct rte_event_queue_conf def_conf;
559 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
560 dev = &rte_eventdevs[dev_id];
562 if (!is_valid_queue(dev, queue_id)) {
563 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
567 /* Check nb_atomic_flows limit */
568 if (is_valid_atomic_queue_conf(queue_conf)) {
569 if (queue_conf->nb_atomic_flows == 0 ||
570 queue_conf->nb_atomic_flows >
571 dev->data->dev_conf.nb_event_queue_flows) {
573 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
574 dev_id, queue_id, queue_conf->nb_atomic_flows,
575 dev->data->dev_conf.nb_event_queue_flows);
580 /* Check nb_atomic_order_sequences limit */
581 if (is_valid_ordered_queue_conf(queue_conf)) {
582 if (queue_conf->nb_atomic_order_sequences == 0 ||
583 queue_conf->nb_atomic_order_sequences >
584 dev->data->dev_conf.nb_event_queue_flows) {
586 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
587 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
588 dev->data->dev_conf.nb_event_queue_flows);
593 if (dev->data->dev_started) {
595 "device %d must be stopped to allow queue setup", dev_id);
599 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
601 if (queue_conf == NULL) {
602 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
604 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
605 queue_conf = &def_conf;
608 dev->data->queues_cfg[queue_id] = *queue_conf;
609 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
610 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
614 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
616 if (port_id < dev->data->nb_ports)
623 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
624 struct rte_event_port_conf *port_conf)
626 struct rte_eventdev *dev;
628 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
629 dev = &rte_eventdevs[dev_id];
631 if (port_conf == NULL)
634 if (!is_valid_port(dev, port_id)) {
635 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
639 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
640 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
641 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
646 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
647 const struct rte_event_port_conf *port_conf)
649 struct rte_eventdev *dev;
650 struct rte_event_port_conf def_conf;
653 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
654 dev = &rte_eventdevs[dev_id];
656 if (!is_valid_port(dev, port_id)) {
657 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
661 /* Check new_event_threshold limit */
662 if ((port_conf && !port_conf->new_event_threshold) ||
663 (port_conf && port_conf->new_event_threshold >
664 dev->data->dev_conf.nb_events_limit)) {
666 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
667 dev_id, port_id, port_conf->new_event_threshold,
668 dev->data->dev_conf.nb_events_limit);
672 /* Check dequeue_depth limit */
673 if ((port_conf && !port_conf->dequeue_depth) ||
674 (port_conf && port_conf->dequeue_depth >
675 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
677 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
678 dev_id, port_id, port_conf->dequeue_depth,
679 dev->data->dev_conf.nb_event_port_dequeue_depth);
683 /* Check enqueue_depth limit */
684 if ((port_conf && !port_conf->enqueue_depth) ||
685 (port_conf && port_conf->enqueue_depth >
686 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
688 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
689 dev_id, port_id, port_conf->enqueue_depth,
690 dev->data->dev_conf.nb_event_port_enqueue_depth);
695 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
696 !(dev->data->event_dev_cap &
697 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
699 "dev%d port%d Implicit release disable not supported",
704 if (dev->data->dev_started) {
706 "device %d must be stopped to allow port setup", dev_id);
710 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
712 if (port_conf == NULL) {
713 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
715 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
716 port_conf = &def_conf;
719 dev->data->ports_cfg[port_id] = *port_conf;
721 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
723 /* Unlink all the queues from this port(default state after setup) */
725 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
727 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
735 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
736 uint32_t *attr_value)
738 struct rte_eventdev *dev;
742 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
743 dev = &rte_eventdevs[dev_id];
746 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
747 *attr_value = dev->data->nb_ports;
749 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
750 *attr_value = dev->data->nb_queues;
752 case RTE_EVENT_DEV_ATTR_STARTED:
753 *attr_value = dev->data->dev_started;
763 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
764 uint32_t *attr_value)
766 struct rte_eventdev *dev;
771 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
772 dev = &rte_eventdevs[dev_id];
773 if (!is_valid_port(dev, port_id)) {
774 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
779 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
780 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
782 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
783 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
785 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
786 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
788 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
792 config = dev->data->ports_cfg[port_id].event_port_cfg;
793 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
803 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
804 uint32_t *attr_value)
806 struct rte_event_queue_conf *conf;
807 struct rte_eventdev *dev;
812 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
813 dev = &rte_eventdevs[dev_id];
814 if (!is_valid_queue(dev, queue_id)) {
815 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
819 conf = &dev->data->queues_cfg[queue_id];
822 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
823 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
824 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
825 *attr_value = conf->priority;
827 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
828 *attr_value = conf->nb_atomic_flows;
830 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
831 *attr_value = conf->nb_atomic_order_sequences;
833 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
834 *attr_value = conf->event_queue_cfg;
836 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
837 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
840 *attr_value = conf->schedule_type;
849 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
850 const uint8_t queues[], const uint8_t priorities[],
853 struct rte_eventdev *dev;
854 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
855 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
859 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
860 dev = &rte_eventdevs[dev_id];
862 if (*dev->dev_ops->port_link == NULL) {
863 RTE_EDEV_LOG_ERR("Function not supported\n");
868 if (!is_valid_port(dev, port_id)) {
869 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
874 if (queues == NULL) {
875 for (i = 0; i < dev->data->nb_queues; i++)
878 queues = queues_list;
879 nb_links = dev->data->nb_queues;
882 if (priorities == NULL) {
883 for (i = 0; i < nb_links; i++)
884 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
886 priorities = priorities_list;
889 for (i = 0; i < nb_links; i++)
890 if (queues[i] >= dev->data->nb_queues) {
895 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
896 queues, priorities, nb_links);
900 links_map = dev->data->links_map;
901 /* Point links_map to this port specific area */
902 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
903 for (i = 0; i < diag; i++)
904 links_map[queues[i]] = (uint8_t)priorities[i];
906 rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
911 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
912 uint8_t queues[], uint16_t nb_unlinks)
914 struct rte_eventdev *dev;
915 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
919 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
920 dev = &rte_eventdevs[dev_id];
922 if (*dev->dev_ops->port_unlink == NULL) {
923 RTE_EDEV_LOG_ERR("Function not supported");
928 if (!is_valid_port(dev, port_id)) {
929 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
934 links_map = dev->data->links_map;
935 /* Point links_map to this port specific area */
936 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
938 if (queues == NULL) {
940 for (i = 0; i < dev->data->nb_queues; i++) {
942 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
949 for (j = 0; j < nb_unlinks; j++) {
950 if (links_map[queues[j]] ==
951 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
957 for (i = 0; i < nb_unlinks; i++)
958 if (queues[i] >= dev->data->nb_queues) {
963 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
969 for (i = 0; i < diag; i++)
970 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
972 rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
977 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
979 struct rte_eventdev *dev;
981 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
982 dev = &rte_eventdevs[dev_id];
983 if (!is_valid_port(dev, port_id)) {
984 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
988 /* Return 0 if the PMD does not implement unlinks in progress.
989 * This allows PMDs which handle unlink synchronously to not implement
990 * this function at all.
992 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
994 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
995 dev->data->ports[port_id]);
999 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1000 uint8_t queues[], uint8_t priorities[])
1002 struct rte_eventdev *dev;
1003 uint16_t *links_map;
1006 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1007 dev = &rte_eventdevs[dev_id];
1008 if (!is_valid_port(dev, port_id)) {
1009 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1013 links_map = dev->data->links_map;
1014 /* Point links_map to this port specific area */
1015 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1016 for (i = 0; i < dev->data->nb_queues; i++) {
1017 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1019 priorities[count] = (uint8_t)links_map[i];
1027 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1028 uint64_t *timeout_ticks)
1030 struct rte_eventdev *dev;
1032 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1033 dev = &rte_eventdevs[dev_id];
1034 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1036 if (timeout_ticks == NULL)
1039 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1043 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1045 struct rte_eventdev *dev;
1047 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1048 dev = &rte_eventdevs[dev_id];
1050 if (service_id == NULL)
1053 if (dev->data->service_inited)
1054 *service_id = dev->data->service_id;
1056 return dev->data->service_inited ? 0 : -ESRCH;
1060 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1062 struct rte_eventdev *dev;
1064 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1065 dev = &rte_eventdevs[dev_id];
1066 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1070 (*dev->dev_ops->dump)(dev, f);
1076 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1077 uint8_t queue_port_id)
1079 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1080 if (dev->dev_ops->xstats_get_names != NULL)
1081 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1088 rte_event_dev_xstats_names_get(uint8_t dev_id,
1089 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1090 struct rte_event_dev_xstats_name *xstats_names,
1091 unsigned int *ids, unsigned int size)
1093 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1094 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1096 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1097 (int)size < cnt_expected_entries)
1098 return cnt_expected_entries;
1100 /* dev_id checked above */
1101 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1103 if (dev->dev_ops->xstats_get_names != NULL)
1104 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1105 queue_port_id, xstats_names, ids, size);
1110 /* retrieve eventdev extended statistics */
1112 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1113 uint8_t queue_port_id, const unsigned int ids[],
1114 uint64_t values[], unsigned int n)
1116 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1117 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1119 /* implemented by the driver */
1120 if (dev->dev_ops->xstats_get != NULL)
1121 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1127 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1130 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1131 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1132 unsigned int temp = -1;
1135 *id = (unsigned int)-1;
1137 id = &temp; /* ensure driver never gets a NULL value */
1139 /* implemented by driver */
1140 if (dev->dev_ops->xstats_get_by_name != NULL)
1141 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1145 int rte_event_dev_xstats_reset(uint8_t dev_id,
1146 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1147 const uint32_t ids[], uint32_t nb_ids)
1149 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1150 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1152 if (dev->dev_ops->xstats_reset != NULL)
1153 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1158 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1160 int rte_event_dev_selftest(uint8_t dev_id)
1162 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1163 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1164 .name = "rte_event_pmd_selftest_seqn_dynfield",
1165 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1166 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1168 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1170 if (dev->dev_ops->dev_selftest != NULL) {
1171 rte_event_pmd_selftest_seqn_dynfield_offset =
1172 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1173 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1175 return (*dev->dev_ops->dev_selftest)();
1180 struct rte_mempool *
1181 rte_event_vector_pool_create(const char *name, unsigned int n,
1182 unsigned int cache_size, uint16_t nb_elem,
1185 const char *mp_ops_name;
1186 struct rte_mempool *mp;
1187 unsigned int elt_sz;
1191 RTE_LOG(ERR, EVENTDEV,
1192 "Invalid number of elements=%d requested\n", nb_elem);
1198 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1199 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1204 mp_ops_name = rte_mbuf_best_mempool_ops();
1205 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1207 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1211 ret = rte_mempool_populate_default(mp);
1217 rte_mempool_free(mp);
1223 rte_event_dev_start(uint8_t dev_id)
1225 struct rte_eventdev *dev;
1228 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1230 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1231 dev = &rte_eventdevs[dev_id];
1232 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1234 if (dev->data->dev_started != 0) {
1235 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1240 diag = (*dev->dev_ops->dev_start)(dev);
1241 rte_eventdev_trace_start(dev_id, diag);
1243 dev->data->dev_started = 1;
1251 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1252 eventdev_stop_flush_t callback, void *userdata)
1254 struct rte_eventdev *dev;
1256 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1258 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1259 dev = &rte_eventdevs[dev_id];
1261 dev->dev_ops->dev_stop_flush = callback;
1262 dev->data->dev_stop_flush_arg = userdata;
1268 rte_event_dev_stop(uint8_t dev_id)
1270 struct rte_eventdev *dev;
1272 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1274 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1275 dev = &rte_eventdevs[dev_id];
1276 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1278 if (dev->data->dev_started == 0) {
1279 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1284 dev->data->dev_started = 0;
1285 (*dev->dev_ops->dev_stop)(dev);
1286 rte_eventdev_trace_stop(dev_id);
1290 rte_event_dev_close(uint8_t dev_id)
1292 struct rte_eventdev *dev;
1294 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1295 dev = &rte_eventdevs[dev_id];
1296 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1298 /* Device must be stopped before it can be closed */
1299 if (dev->data->dev_started == 1) {
1300 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1305 rte_eventdev_trace_close(dev_id);
1306 return (*dev->dev_ops->dev_close)(dev);
1310 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1313 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1314 const struct rte_memzone *mz;
1317 /* Generate memzone name */
1318 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1319 if (n >= (int)sizeof(mz_name))
1322 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1323 mz = rte_memzone_reserve(mz_name,
1324 sizeof(struct rte_eventdev_data),
1327 mz = rte_memzone_lookup(mz_name);
1333 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1334 memset(*data, 0, sizeof(struct rte_eventdev_data));
1335 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1336 RTE_EVENT_MAX_QUEUES_PER_DEV;
1338 (*data)->links_map[n] =
1339 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1345 static inline uint8_t
1346 eventdev_find_free_device_index(void)
1350 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1351 if (rte_eventdevs[dev_id].attached ==
1352 RTE_EVENTDEV_DETACHED)
1355 return RTE_EVENT_MAX_DEVS;
1359 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1360 __rte_unused struct rte_event ev[],
1361 __rte_unused uint16_t nb_events)
1363 rte_errno = ENOTSUP;
1368 rte_event_crypto_adapter_enqueue(__rte_unused void *port,
1369 __rte_unused struct rte_event ev[],
1370 __rte_unused uint16_t nb_events)
1372 rte_errno = ENOTSUP;
1376 struct rte_eventdev *
1377 rte_event_pmd_allocate(const char *name, int socket_id)
1379 struct rte_eventdev *eventdev;
1382 if (rte_event_pmd_get_named_dev(name) != NULL) {
1383 RTE_EDEV_LOG_ERR("Event device with name %s already "
1384 "allocated!", name);
1388 dev_id = eventdev_find_free_device_index();
1389 if (dev_id == RTE_EVENT_MAX_DEVS) {
1390 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1394 eventdev = &rte_eventdevs[dev_id];
1396 eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1397 eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
1398 eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
1400 if (eventdev->data == NULL) {
1401 struct rte_eventdev_data *eventdev_data = NULL;
1404 eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1406 if (retval < 0 || eventdev_data == NULL)
1409 eventdev->data = eventdev_data;
1411 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1413 strlcpy(eventdev->data->name, name,
1414 RTE_EVENTDEV_NAME_MAX_LEN);
1416 eventdev->data->dev_id = dev_id;
1417 eventdev->data->socket_id = socket_id;
1418 eventdev->data->dev_started = 0;
1421 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1422 eventdev_globals.nb_devs++;
1429 rte_event_pmd_release(struct rte_eventdev *eventdev)
1432 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1433 const struct rte_memzone *mz;
1435 if (eventdev == NULL)
1438 eventdev->attached = RTE_EVENTDEV_DETACHED;
1439 eventdev_globals.nb_devs--;
1441 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1442 rte_free(eventdev->data->dev_private);
1444 /* Generate memzone name */
1445 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1446 eventdev->data->dev_id);
1447 if (ret >= (int)sizeof(mz_name))
1450 mz = rte_memzone_lookup(mz_name);
1454 ret = rte_memzone_free(mz);
1459 eventdev->data = NULL;
1465 handle_dev_list(const char *cmd __rte_unused,
1466 const char *params __rte_unused,
1467 struct rte_tel_data *d)
1470 int ndev = rte_event_dev_count();
1475 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1476 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1477 if (rte_eventdevs[dev_id].attached ==
1478 RTE_EVENTDEV_ATTACHED)
1479 rte_tel_data_add_array_int(d, dev_id);
1486 handle_port_list(const char *cmd __rte_unused,
1488 struct rte_tel_data *d)
1492 struct rte_eventdev *dev;
1495 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1498 dev_id = strtoul(params, &end_param, 10);
1499 if (*end_param != '\0')
1501 "Extra parameters passed to eventdev telemetry command, ignoring");
1503 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1504 dev = &rte_eventdevs[dev_id];
1506 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1507 for (i = 0; i < dev->data->nb_ports; i++)
1508 rte_tel_data_add_array_int(d, i);
1514 handle_queue_list(const char *cmd __rte_unused,
1516 struct rte_tel_data *d)
1520 struct rte_eventdev *dev;
1523 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1526 dev_id = strtoul(params, &end_param, 10);
1527 if (*end_param != '\0')
1529 "Extra parameters passed to eventdev telemetry command, ignoring");
1531 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1532 dev = &rte_eventdevs[dev_id];
1534 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1535 for (i = 0; i < dev->data->nb_queues; i++)
1536 rte_tel_data_add_array_int(d, i);
1542 handle_queue_links(const char *cmd __rte_unused,
1544 struct rte_tel_data *d)
1546 int i, ret, port_id = 0;
1549 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1550 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1551 const char *p_param;
1553 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1556 /* Get dev ID from parameter string */
1557 dev_id = strtoul(params, &end_param, 10);
1558 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1560 p_param = strtok(end_param, ",");
1561 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1564 port_id = strtoul(p_param, &end_param, 10);
1565 p_param = strtok(NULL, "\0");
1566 if (p_param != NULL)
1568 "Extra parameters passed to eventdev telemetry command, ignoring");
1570 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1574 rte_tel_data_start_dict(d);
1575 for (i = 0; i < ret; i++) {
1578 snprintf(qid_name, 31, "qid_%u", queues[i]);
1579 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1586 eventdev_build_telemetry_data(int dev_id,
1587 enum rte_event_dev_xstats_mode mode,
1589 struct rte_tel_data *d)
1591 struct rte_event_dev_xstats_name *xstat_names;
1594 int i, ret, num_xstats;
1596 num_xstats = rte_event_dev_xstats_names_get(dev_id,
1606 /* use one malloc for names */
1607 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1609 if (xstat_names == NULL)
1612 ids = malloc((sizeof(unsigned int)) * num_xstats);
1618 values = malloc((sizeof(uint64_t)) * num_xstats);
1619 if (values == NULL) {
1625 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1626 xstat_names, ids, num_xstats);
1627 if (ret < 0 || ret > num_xstats) {
1634 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1635 ids, values, num_xstats);
1636 if (ret < 0 || ret > num_xstats) {
1643 rte_tel_data_start_dict(d);
1644 for (i = 0; i < num_xstats; i++)
1645 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1655 handle_dev_xstats(const char *cmd __rte_unused,
1657 struct rte_tel_data *d)
1660 enum rte_event_dev_xstats_mode mode;
1663 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1666 /* Get dev ID from parameter string */
1667 dev_id = strtoul(params, &end_param, 10);
1668 if (*end_param != '\0')
1670 "Extra parameters passed to eventdev telemetry command, ignoring");
1672 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1674 mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1675 return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1679 handle_port_xstats(const char *cmd __rte_unused,
1681 struct rte_tel_data *d)
1684 int port_queue_id = 0;
1685 enum rte_event_dev_xstats_mode mode;
1687 const char *p_param;
1689 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1692 /* Get dev ID from parameter string */
1693 dev_id = strtoul(params, &end_param, 10);
1694 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1696 p_param = strtok(end_param, ",");
1697 mode = RTE_EVENT_DEV_XSTATS_PORT;
1699 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1702 port_queue_id = strtoul(p_param, &end_param, 10);
1704 p_param = strtok(NULL, "\0");
1705 if (p_param != NULL)
1707 "Extra parameters passed to eventdev telemetry command, ignoring");
1709 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1713 handle_queue_xstats(const char *cmd __rte_unused,
1715 struct rte_tel_data *d)
1718 int port_queue_id = 0;
1719 enum rte_event_dev_xstats_mode mode;
1721 const char *p_param;
1723 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1726 /* Get dev ID from parameter string */
1727 dev_id = strtoul(params, &end_param, 10);
1728 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1730 p_param = strtok(end_param, ",");
1731 mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1733 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1736 port_queue_id = strtoul(p_param, &end_param, 10);
1738 p_param = strtok(NULL, "\0");
1739 if (p_param != NULL)
1741 "Extra parameters passed to eventdev telemetry command, ignoring");
1743 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1746 RTE_INIT(eventdev_init_telemetry)
1748 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1749 "Returns list of available eventdevs. Takes no parameters");
1750 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1751 "Returns list of available ports. Parameter: DevID");
1752 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1753 "Returns list of available queues. Parameter: DevID");
1755 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1756 "Returns stats for an eventdev. Parameter: DevID");
1757 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1758 "Returns stats for an eventdev port. Params: DevID,PortID");
1759 rte_telemetry_register_cmd("/eventdev/queue_xstats",
1760 handle_queue_xstats,
1761 "Returns stats for an eventdev queue. Params: DevID,QueueID");
1762 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1763 "Returns links for an eventdev port. Params: DevID,QueueID");