1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <ethdev_driver.h>
33 #include <rte_cryptodev.h>
34 #include <cryptodev_pmd.h>
35 #include <rte_telemetry.h>
37 #include "rte_eventdev.h"
38 #include "eventdev_pmd.h"
39 #include "eventdev_trace.h"
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
45 static struct rte_eventdev_global eventdev_globals = {
49 /* Public fastpath APIs. */
50 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
52 /* Event dev north bound API implementation */
55 rte_event_dev_count(void)
57 return eventdev_globals.nb_devs;
61 rte_event_dev_get_dev_id(const char *name)
69 for (i = 0; i < eventdev_globals.nb_devs; i++) {
70 cmp = (strncmp(rte_event_devices[i].data->name, name,
71 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
72 (rte_event_devices[i].dev ? (strncmp(
73 rte_event_devices[i].dev->driver->name, name,
74 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
75 if (cmp && (rte_event_devices[i].attached ==
76 RTE_EVENTDEV_ATTACHED))
83 rte_event_dev_socket_id(uint8_t dev_id)
85 struct rte_eventdev *dev;
87 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 dev = &rte_eventdevs[dev_id];
90 return dev->data->socket_id;
94 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
96 struct rte_eventdev *dev;
98 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
99 dev = &rte_eventdevs[dev_id];
101 if (dev_info == NULL)
104 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
106 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
107 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
109 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
111 dev_info->dev = dev->dev;
116 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
119 struct rte_eventdev *dev;
121 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
122 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
124 dev = &rte_eventdevs[dev_id];
129 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
130 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
134 return dev->dev_ops->eth_rx_adapter_caps_get ?
135 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
136 &rte_eth_devices[eth_port_id],
142 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
144 struct rte_eventdev *dev;
145 const struct event_timer_adapter_ops *ops;
147 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
149 dev = &rte_eventdevs[dev_id];
155 return dev->dev_ops->timer_adapter_caps_get ?
156 (*dev->dev_ops->timer_adapter_caps_get)(dev,
164 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
167 struct rte_eventdev *dev;
168 struct rte_cryptodev *cdev;
170 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
171 if (!rte_cryptodev_is_valid_dev(cdev_id))
174 dev = &rte_eventdevs[dev_id];
175 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
181 return dev->dev_ops->crypto_adapter_caps_get ?
182 (*dev->dev_ops->crypto_adapter_caps_get)
183 (dev, cdev, caps) : -ENOTSUP;
187 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
190 struct rte_eventdev *dev;
191 struct rte_eth_dev *eth_dev;
193 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
194 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
196 dev = &rte_eventdevs[dev_id];
197 eth_dev = &rte_eth_devices[eth_port_id];
202 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
203 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
207 return dev->dev_ops->eth_tx_adapter_caps_get ?
208 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
215 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
217 uint8_t old_nb_queues = dev->data->nb_queues;
218 struct rte_event_queue_conf *queues_cfg;
221 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
224 if (nb_queues != 0) {
225 queues_cfg = dev->data->queues_cfg;
226 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
228 for (i = nb_queues; i < old_nb_queues; i++)
229 (*dev->dev_ops->queue_release)(dev, i);
232 if (nb_queues > old_nb_queues) {
233 uint8_t new_qs = nb_queues - old_nb_queues;
235 memset(queues_cfg + old_nb_queues, 0,
236 sizeof(queues_cfg[0]) * new_qs);
239 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
241 for (i = nb_queues; i < old_nb_queues; i++)
242 (*dev->dev_ops->queue_release)(dev, i);
245 dev->data->nb_queues = nb_queues;
249 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
252 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
254 uint8_t old_nb_ports = dev->data->nb_ports;
257 struct rte_event_port_conf *ports_cfg;
260 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
263 if (nb_ports != 0) { /* re-config */
264 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
266 ports = dev->data->ports;
267 ports_cfg = dev->data->ports_cfg;
268 links_map = dev->data->links_map;
270 for (i = nb_ports; i < old_nb_ports; i++)
271 (*dev->dev_ops->port_release)(ports[i]);
273 if (nb_ports > old_nb_ports) {
274 uint8_t new_ps = nb_ports - old_nb_ports;
275 unsigned int old_links_map_end =
276 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
277 unsigned int links_map_end =
278 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
280 memset(ports + old_nb_ports, 0,
281 sizeof(ports[0]) * new_ps);
282 memset(ports_cfg + old_nb_ports, 0,
283 sizeof(ports_cfg[0]) * new_ps);
284 for (i = old_links_map_end; i < links_map_end; i++)
286 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
289 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
291 ports = dev->data->ports;
292 for (i = nb_ports; i < old_nb_ports; i++) {
293 (*dev->dev_ops->port_release)(ports[i]);
298 dev->data->nb_ports = nb_ports;
303 rte_event_dev_configure(uint8_t dev_id,
304 const struct rte_event_dev_config *dev_conf)
306 struct rte_event_dev_info info;
307 struct rte_eventdev *dev;
310 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
311 dev = &rte_eventdevs[dev_id];
313 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
314 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
316 if (dev->data->dev_started) {
318 "device %d must be stopped to allow configuration", dev_id);
322 if (dev_conf == NULL)
325 (*dev->dev_ops->dev_infos_get)(dev, &info);
327 /* Check dequeue_timeout_ns value is in limit */
328 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
329 if (dev_conf->dequeue_timeout_ns &&
330 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
331 || dev_conf->dequeue_timeout_ns >
332 info.max_dequeue_timeout_ns)) {
333 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
334 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
335 dev_id, dev_conf->dequeue_timeout_ns,
336 info.min_dequeue_timeout_ns,
337 info.max_dequeue_timeout_ns);
342 /* Check nb_events_limit is in limit */
343 if (dev_conf->nb_events_limit > info.max_num_events) {
344 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
345 dev_id, dev_conf->nb_events_limit, info.max_num_events);
349 /* Check nb_event_queues is in limit */
350 if (!dev_conf->nb_event_queues) {
351 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
355 if (dev_conf->nb_event_queues > info.max_event_queues +
356 info.max_single_link_event_port_queue_pairs) {
357 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
358 dev_id, dev_conf->nb_event_queues,
359 info.max_event_queues,
360 info.max_single_link_event_port_queue_pairs);
363 if (dev_conf->nb_event_queues -
364 dev_conf->nb_single_link_event_port_queues >
365 info.max_event_queues) {
366 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
367 dev_id, dev_conf->nb_event_queues,
368 dev_conf->nb_single_link_event_port_queues,
369 info.max_event_queues);
372 if (dev_conf->nb_single_link_event_port_queues >
373 dev_conf->nb_event_queues) {
374 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
376 dev_conf->nb_single_link_event_port_queues,
377 dev_conf->nb_event_queues);
381 /* Check nb_event_ports is in limit */
382 if (!dev_conf->nb_event_ports) {
383 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
386 if (dev_conf->nb_event_ports > info.max_event_ports +
387 info.max_single_link_event_port_queue_pairs) {
388 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
389 dev_id, dev_conf->nb_event_ports,
390 info.max_event_ports,
391 info.max_single_link_event_port_queue_pairs);
394 if (dev_conf->nb_event_ports -
395 dev_conf->nb_single_link_event_port_queues
396 > info.max_event_ports) {
397 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
398 dev_id, dev_conf->nb_event_ports,
399 dev_conf->nb_single_link_event_port_queues,
400 info.max_event_ports);
404 if (dev_conf->nb_single_link_event_port_queues >
405 dev_conf->nb_event_ports) {
407 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
409 dev_conf->nb_single_link_event_port_queues,
410 dev_conf->nb_event_ports);
414 /* Check nb_event_queue_flows is in limit */
415 if (!dev_conf->nb_event_queue_flows) {
416 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
419 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
420 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
421 dev_id, dev_conf->nb_event_queue_flows,
422 info.max_event_queue_flows);
426 /* Check nb_event_port_dequeue_depth is in limit */
427 if (!dev_conf->nb_event_port_dequeue_depth) {
428 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
432 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
433 (dev_conf->nb_event_port_dequeue_depth >
434 info.max_event_port_dequeue_depth)) {
435 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
436 dev_id, dev_conf->nb_event_port_dequeue_depth,
437 info.max_event_port_dequeue_depth);
441 /* Check nb_event_port_enqueue_depth is in limit */
442 if (!dev_conf->nb_event_port_enqueue_depth) {
443 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
447 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
448 (dev_conf->nb_event_port_enqueue_depth >
449 info.max_event_port_enqueue_depth)) {
450 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
451 dev_id, dev_conf->nb_event_port_enqueue_depth,
452 info.max_event_port_enqueue_depth);
456 /* Copy the dev_conf parameter into the dev structure */
457 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
459 /* Setup new number of queues and reconfigure device. */
460 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
462 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
467 /* Setup new number of ports and reconfigure device. */
468 diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
470 event_dev_queue_config(dev, 0);
471 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
476 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
478 /* Configure the device */
479 diag = (*dev->dev_ops->dev_configure)(dev);
481 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
482 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
483 event_dev_queue_config(dev, 0);
484 event_dev_port_config(dev, 0);
487 dev->data->event_dev_cap = info.event_dev_cap;
488 rte_eventdev_trace_configure(dev_id, dev_conf, diag);
493 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
495 if (queue_id < dev->data->nb_queues && queue_id <
496 RTE_EVENT_MAX_QUEUES_PER_DEV)
503 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
504 struct rte_event_queue_conf *queue_conf)
506 struct rte_eventdev *dev;
508 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
509 dev = &rte_eventdevs[dev_id];
511 if (queue_conf == NULL)
514 if (!is_valid_queue(dev, queue_id)) {
515 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
519 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
520 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
521 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
526 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
529 !(queue_conf->event_queue_cfg &
530 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
531 ((queue_conf->event_queue_cfg &
532 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
533 (queue_conf->schedule_type
534 == RTE_SCHED_TYPE_ATOMIC)
542 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
545 !(queue_conf->event_queue_cfg &
546 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
547 ((queue_conf->event_queue_cfg &
548 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
549 (queue_conf->schedule_type
550 == RTE_SCHED_TYPE_ORDERED)
559 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
560 const struct rte_event_queue_conf *queue_conf)
562 struct rte_eventdev *dev;
563 struct rte_event_queue_conf def_conf;
565 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
566 dev = &rte_eventdevs[dev_id];
568 if (!is_valid_queue(dev, queue_id)) {
569 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
573 /* Check nb_atomic_flows limit */
574 if (is_valid_atomic_queue_conf(queue_conf)) {
575 if (queue_conf->nb_atomic_flows == 0 ||
576 queue_conf->nb_atomic_flows >
577 dev->data->dev_conf.nb_event_queue_flows) {
579 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
580 dev_id, queue_id, queue_conf->nb_atomic_flows,
581 dev->data->dev_conf.nb_event_queue_flows);
586 /* Check nb_atomic_order_sequences limit */
587 if (is_valid_ordered_queue_conf(queue_conf)) {
588 if (queue_conf->nb_atomic_order_sequences == 0 ||
589 queue_conf->nb_atomic_order_sequences >
590 dev->data->dev_conf.nb_event_queue_flows) {
592 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
593 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
594 dev->data->dev_conf.nb_event_queue_flows);
599 if (dev->data->dev_started) {
601 "device %d must be stopped to allow queue setup", dev_id);
605 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
607 if (queue_conf == NULL) {
608 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
610 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
611 queue_conf = &def_conf;
614 dev->data->queues_cfg[queue_id] = *queue_conf;
615 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
616 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
620 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
622 if (port_id < dev->data->nb_ports)
629 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
630 struct rte_event_port_conf *port_conf)
632 struct rte_eventdev *dev;
634 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
635 dev = &rte_eventdevs[dev_id];
637 if (port_conf == NULL)
640 if (!is_valid_port(dev, port_id)) {
641 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
645 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
646 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
647 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
652 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
653 const struct rte_event_port_conf *port_conf)
655 struct rte_eventdev *dev;
656 struct rte_event_port_conf def_conf;
659 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
660 dev = &rte_eventdevs[dev_id];
662 if (!is_valid_port(dev, port_id)) {
663 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
667 /* Check new_event_threshold limit */
668 if ((port_conf && !port_conf->new_event_threshold) ||
669 (port_conf && port_conf->new_event_threshold >
670 dev->data->dev_conf.nb_events_limit)) {
672 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
673 dev_id, port_id, port_conf->new_event_threshold,
674 dev->data->dev_conf.nb_events_limit);
678 /* Check dequeue_depth limit */
679 if ((port_conf && !port_conf->dequeue_depth) ||
680 (port_conf && port_conf->dequeue_depth >
681 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
683 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
684 dev_id, port_id, port_conf->dequeue_depth,
685 dev->data->dev_conf.nb_event_port_dequeue_depth);
689 /* Check enqueue_depth limit */
690 if ((port_conf && !port_conf->enqueue_depth) ||
691 (port_conf && port_conf->enqueue_depth >
692 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
694 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
695 dev_id, port_id, port_conf->enqueue_depth,
696 dev->data->dev_conf.nb_event_port_enqueue_depth);
701 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
702 !(dev->data->event_dev_cap &
703 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
705 "dev%d port%d Implicit release disable not supported",
710 if (dev->data->dev_started) {
712 "device %d must be stopped to allow port setup", dev_id);
716 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
718 if (port_conf == NULL) {
719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
721 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
722 port_conf = &def_conf;
725 dev->data->ports_cfg[port_id] = *port_conf;
727 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
729 /* Unlink all the queues from this port(default state after setup) */
731 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
733 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
741 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
742 uint32_t *attr_value)
744 struct rte_eventdev *dev;
748 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
749 dev = &rte_eventdevs[dev_id];
752 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
753 *attr_value = dev->data->nb_ports;
755 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
756 *attr_value = dev->data->nb_queues;
758 case RTE_EVENT_DEV_ATTR_STARTED:
759 *attr_value = dev->data->dev_started;
769 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
770 uint32_t *attr_value)
772 struct rte_eventdev *dev;
777 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
778 dev = &rte_eventdevs[dev_id];
779 if (!is_valid_port(dev, port_id)) {
780 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
785 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
786 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
788 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
789 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
791 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
792 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
794 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
798 config = dev->data->ports_cfg[port_id].event_port_cfg;
799 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
809 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
810 uint32_t *attr_value)
812 struct rte_event_queue_conf *conf;
813 struct rte_eventdev *dev;
818 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
819 dev = &rte_eventdevs[dev_id];
820 if (!is_valid_queue(dev, queue_id)) {
821 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
825 conf = &dev->data->queues_cfg[queue_id];
828 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
829 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
830 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
831 *attr_value = conf->priority;
833 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
834 *attr_value = conf->nb_atomic_flows;
836 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
837 *attr_value = conf->nb_atomic_order_sequences;
839 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
840 *attr_value = conf->event_queue_cfg;
842 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
843 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
846 *attr_value = conf->schedule_type;
855 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
856 const uint8_t queues[], const uint8_t priorities[],
859 struct rte_eventdev *dev;
860 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
861 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
865 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
866 dev = &rte_eventdevs[dev_id];
868 if (*dev->dev_ops->port_link == NULL) {
869 RTE_EDEV_LOG_ERR("Function not supported\n");
874 if (!is_valid_port(dev, port_id)) {
875 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
880 if (queues == NULL) {
881 for (i = 0; i < dev->data->nb_queues; i++)
884 queues = queues_list;
885 nb_links = dev->data->nb_queues;
888 if (priorities == NULL) {
889 for (i = 0; i < nb_links; i++)
890 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
892 priorities = priorities_list;
895 for (i = 0; i < nb_links; i++)
896 if (queues[i] >= dev->data->nb_queues) {
901 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
902 queues, priorities, nb_links);
906 links_map = dev->data->links_map;
907 /* Point links_map to this port specific area */
908 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
909 for (i = 0; i < diag; i++)
910 links_map[queues[i]] = (uint8_t)priorities[i];
912 rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
917 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
918 uint8_t queues[], uint16_t nb_unlinks)
920 struct rte_eventdev *dev;
921 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
925 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
926 dev = &rte_eventdevs[dev_id];
928 if (*dev->dev_ops->port_unlink == NULL) {
929 RTE_EDEV_LOG_ERR("Function not supported");
934 if (!is_valid_port(dev, port_id)) {
935 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
940 links_map = dev->data->links_map;
941 /* Point links_map to this port specific area */
942 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
944 if (queues == NULL) {
946 for (i = 0; i < dev->data->nb_queues; i++) {
948 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
955 for (j = 0; j < nb_unlinks; j++) {
956 if (links_map[queues[j]] ==
957 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
963 for (i = 0; i < nb_unlinks; i++)
964 if (queues[i] >= dev->data->nb_queues) {
969 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
975 for (i = 0; i < diag; i++)
976 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
978 rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
983 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
985 struct rte_eventdev *dev;
987 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
988 dev = &rte_eventdevs[dev_id];
989 if (!is_valid_port(dev, port_id)) {
990 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
994 /* Return 0 if the PMD does not implement unlinks in progress.
995 * This allows PMDs which handle unlink synchronously to not implement
996 * this function at all.
998 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1000 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1001 dev->data->ports[port_id]);
1005 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1006 uint8_t queues[], uint8_t priorities[])
1008 struct rte_eventdev *dev;
1009 uint16_t *links_map;
1012 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1013 dev = &rte_eventdevs[dev_id];
1014 if (!is_valid_port(dev, port_id)) {
1015 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1019 links_map = dev->data->links_map;
1020 /* Point links_map to this port specific area */
1021 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1022 for (i = 0; i < dev->data->nb_queues; i++) {
1023 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1025 priorities[count] = (uint8_t)links_map[i];
1033 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1034 uint64_t *timeout_ticks)
1036 struct rte_eventdev *dev;
1038 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1039 dev = &rte_eventdevs[dev_id];
1040 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1042 if (timeout_ticks == NULL)
1045 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1049 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1051 struct rte_eventdev *dev;
1053 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1054 dev = &rte_eventdevs[dev_id];
1056 if (service_id == NULL)
1059 if (dev->data->service_inited)
1060 *service_id = dev->data->service_id;
1062 return dev->data->service_inited ? 0 : -ESRCH;
1066 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1068 struct rte_eventdev *dev;
1070 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1071 dev = &rte_eventdevs[dev_id];
1072 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1076 (*dev->dev_ops->dump)(dev, f);
1082 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1083 uint8_t queue_port_id)
1085 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1086 if (dev->dev_ops->xstats_get_names != NULL)
1087 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1094 rte_event_dev_xstats_names_get(uint8_t dev_id,
1095 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1096 struct rte_event_dev_xstats_name *xstats_names,
1097 unsigned int *ids, unsigned int size)
1099 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1100 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1102 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1103 (int)size < cnt_expected_entries)
1104 return cnt_expected_entries;
1106 /* dev_id checked above */
1107 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1109 if (dev->dev_ops->xstats_get_names != NULL)
1110 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1111 queue_port_id, xstats_names, ids, size);
1116 /* retrieve eventdev extended statistics */
1118 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1119 uint8_t queue_port_id, const unsigned int ids[],
1120 uint64_t values[], unsigned int n)
1122 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1123 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1125 /* implemented by the driver */
1126 if (dev->dev_ops->xstats_get != NULL)
1127 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1133 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1136 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1137 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1138 unsigned int temp = -1;
1141 *id = (unsigned int)-1;
1143 id = &temp; /* ensure driver never gets a NULL value */
1145 /* implemented by driver */
1146 if (dev->dev_ops->xstats_get_by_name != NULL)
1147 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1151 int rte_event_dev_xstats_reset(uint8_t dev_id,
1152 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1153 const uint32_t ids[], uint32_t nb_ids)
1155 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1156 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1158 if (dev->dev_ops->xstats_reset != NULL)
1159 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1164 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1166 int rte_event_dev_selftest(uint8_t dev_id)
1168 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1169 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1170 .name = "rte_event_pmd_selftest_seqn_dynfield",
1171 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1172 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1174 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1176 if (dev->dev_ops->dev_selftest != NULL) {
1177 rte_event_pmd_selftest_seqn_dynfield_offset =
1178 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1179 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1181 return (*dev->dev_ops->dev_selftest)();
1186 struct rte_mempool *
1187 rte_event_vector_pool_create(const char *name, unsigned int n,
1188 unsigned int cache_size, uint16_t nb_elem,
1191 const char *mp_ops_name;
1192 struct rte_mempool *mp;
1193 unsigned int elt_sz;
1197 RTE_LOG(ERR, EVENTDEV,
1198 "Invalid number of elements=%d requested\n", nb_elem);
1204 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1205 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1210 mp_ops_name = rte_mbuf_best_mempool_ops();
1211 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1213 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1217 ret = rte_mempool_populate_default(mp);
1223 rte_mempool_free(mp);
1229 rte_event_dev_start(uint8_t dev_id)
1231 struct rte_eventdev *dev;
1234 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1236 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1237 dev = &rte_eventdevs[dev_id];
1238 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1240 if (dev->data->dev_started != 0) {
1241 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1246 diag = (*dev->dev_ops->dev_start)(dev);
1247 rte_eventdev_trace_start(dev_id, diag);
1249 dev->data->dev_started = 1;
1253 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1259 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1260 eventdev_stop_flush_t callback, void *userdata)
1262 struct rte_eventdev *dev;
1264 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1266 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1267 dev = &rte_eventdevs[dev_id];
1269 dev->dev_ops->dev_stop_flush = callback;
1270 dev->data->dev_stop_flush_arg = userdata;
1276 rte_event_dev_stop(uint8_t dev_id)
1278 struct rte_eventdev *dev;
1280 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1282 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1283 dev = &rte_eventdevs[dev_id];
1284 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1286 if (dev->data->dev_started == 0) {
1287 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1292 dev->data->dev_started = 0;
1293 (*dev->dev_ops->dev_stop)(dev);
1294 rte_eventdev_trace_stop(dev_id);
1295 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1299 rte_event_dev_close(uint8_t dev_id)
1301 struct rte_eventdev *dev;
1303 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1304 dev = &rte_eventdevs[dev_id];
1305 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1307 /* Device must be stopped before it can be closed */
1308 if (dev->data->dev_started == 1) {
1309 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1314 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1315 rte_eventdev_trace_close(dev_id);
1316 return (*dev->dev_ops->dev_close)(dev);
1320 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1323 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1324 const struct rte_memzone *mz;
1327 /* Generate memzone name */
1328 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1329 if (n >= (int)sizeof(mz_name))
1332 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1333 mz = rte_memzone_reserve(mz_name,
1334 sizeof(struct rte_eventdev_data),
1337 mz = rte_memzone_lookup(mz_name);
1343 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1344 memset(*data, 0, sizeof(struct rte_eventdev_data));
1345 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1346 RTE_EVENT_MAX_QUEUES_PER_DEV;
1348 (*data)->links_map[n] =
1349 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1355 static inline uint8_t
1356 eventdev_find_free_device_index(void)
1360 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1361 if (rte_eventdevs[dev_id].attached ==
1362 RTE_EVENTDEV_DETACHED)
1365 return RTE_EVENT_MAX_DEVS;
1368 struct rte_eventdev *
1369 rte_event_pmd_allocate(const char *name, int socket_id)
1371 struct rte_eventdev *eventdev;
1374 if (rte_event_pmd_get_named_dev(name) != NULL) {
1375 RTE_EDEV_LOG_ERR("Event device with name %s already "
1376 "allocated!", name);
1380 dev_id = eventdev_find_free_device_index();
1381 if (dev_id == RTE_EVENT_MAX_DEVS) {
1382 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1386 eventdev = &rte_eventdevs[dev_id];
1388 if (eventdev->data == NULL) {
1389 struct rte_eventdev_data *eventdev_data = NULL;
1392 eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1394 if (retval < 0 || eventdev_data == NULL)
1397 eventdev->data = eventdev_data;
1399 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1401 strlcpy(eventdev->data->name, name,
1402 RTE_EVENTDEV_NAME_MAX_LEN);
1404 eventdev->data->dev_id = dev_id;
1405 eventdev->data->socket_id = socket_id;
1406 eventdev->data->dev_started = 0;
1409 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1410 eventdev_globals.nb_devs++;
1417 rte_event_pmd_release(struct rte_eventdev *eventdev)
1420 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1421 const struct rte_memzone *mz;
1423 if (eventdev == NULL)
1426 event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1427 eventdev->attached = RTE_EVENTDEV_DETACHED;
1428 eventdev_globals.nb_devs--;
1430 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1431 rte_free(eventdev->data->dev_private);
1433 /* Generate memzone name */
1434 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1435 eventdev->data->dev_id);
1436 if (ret >= (int)sizeof(mz_name))
1439 mz = rte_memzone_lookup(mz_name);
1443 ret = rte_memzone_free(mz);
1448 eventdev->data = NULL;
1453 event_dev_probing_finish(struct rte_eventdev *eventdev)
1455 if (eventdev == NULL)
1458 event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1463 handle_dev_list(const char *cmd __rte_unused,
1464 const char *params __rte_unused,
1465 struct rte_tel_data *d)
1468 int ndev = rte_event_dev_count();
1473 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1474 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1475 if (rte_eventdevs[dev_id].attached ==
1476 RTE_EVENTDEV_ATTACHED)
1477 rte_tel_data_add_array_int(d, dev_id);
1484 handle_port_list(const char *cmd __rte_unused,
1486 struct rte_tel_data *d)
1490 struct rte_eventdev *dev;
1493 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1496 dev_id = strtoul(params, &end_param, 10);
1497 if (*end_param != '\0')
1499 "Extra parameters passed to eventdev telemetry command, ignoring");
1501 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1502 dev = &rte_eventdevs[dev_id];
1504 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1505 for (i = 0; i < dev->data->nb_ports; i++)
1506 rte_tel_data_add_array_int(d, i);
1512 handle_queue_list(const char *cmd __rte_unused,
1514 struct rte_tel_data *d)
1518 struct rte_eventdev *dev;
1521 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1524 dev_id = strtoul(params, &end_param, 10);
1525 if (*end_param != '\0')
1527 "Extra parameters passed to eventdev telemetry command, ignoring");
1529 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1530 dev = &rte_eventdevs[dev_id];
1532 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1533 for (i = 0; i < dev->data->nb_queues; i++)
1534 rte_tel_data_add_array_int(d, i);
1540 handle_queue_links(const char *cmd __rte_unused,
1542 struct rte_tel_data *d)
1544 int i, ret, port_id = 0;
1547 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1548 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1549 const char *p_param;
1551 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1554 /* Get dev ID from parameter string */
1555 dev_id = strtoul(params, &end_param, 10);
1556 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1558 p_param = strtok(end_param, ",");
1559 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1562 port_id = strtoul(p_param, &end_param, 10);
1563 p_param = strtok(NULL, "\0");
1564 if (p_param != NULL)
1566 "Extra parameters passed to eventdev telemetry command, ignoring");
1568 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1572 rte_tel_data_start_dict(d);
1573 for (i = 0; i < ret; i++) {
1576 snprintf(qid_name, 31, "qid_%u", queues[i]);
1577 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1584 eventdev_build_telemetry_data(int dev_id,
1585 enum rte_event_dev_xstats_mode mode,
1587 struct rte_tel_data *d)
1589 struct rte_event_dev_xstats_name *xstat_names;
1592 int i, ret, num_xstats;
1594 num_xstats = rte_event_dev_xstats_names_get(dev_id,
1604 /* use one malloc for names */
1605 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1607 if (xstat_names == NULL)
1610 ids = malloc((sizeof(unsigned int)) * num_xstats);
1616 values = malloc((sizeof(uint64_t)) * num_xstats);
1617 if (values == NULL) {
1623 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1624 xstat_names, ids, num_xstats);
1625 if (ret < 0 || ret > num_xstats) {
1632 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1633 ids, values, num_xstats);
1634 if (ret < 0 || ret > num_xstats) {
1641 rte_tel_data_start_dict(d);
1642 for (i = 0; i < num_xstats; i++)
1643 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1653 handle_dev_xstats(const char *cmd __rte_unused,
1655 struct rte_tel_data *d)
1658 enum rte_event_dev_xstats_mode mode;
1661 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1664 /* Get dev ID from parameter string */
1665 dev_id = strtoul(params, &end_param, 10);
1666 if (*end_param != '\0')
1668 "Extra parameters passed to eventdev telemetry command, ignoring");
1670 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1672 mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1673 return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1677 handle_port_xstats(const char *cmd __rte_unused,
1679 struct rte_tel_data *d)
1682 int port_queue_id = 0;
1683 enum rte_event_dev_xstats_mode mode;
1685 const char *p_param;
1687 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1690 /* Get dev ID from parameter string */
1691 dev_id = strtoul(params, &end_param, 10);
1692 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1694 p_param = strtok(end_param, ",");
1695 mode = RTE_EVENT_DEV_XSTATS_PORT;
1697 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1700 port_queue_id = strtoul(p_param, &end_param, 10);
1702 p_param = strtok(NULL, "\0");
1703 if (p_param != NULL)
1705 "Extra parameters passed to eventdev telemetry command, ignoring");
1707 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1711 handle_queue_xstats(const char *cmd __rte_unused,
1713 struct rte_tel_data *d)
1716 int port_queue_id = 0;
1717 enum rte_event_dev_xstats_mode mode;
1719 const char *p_param;
1721 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1724 /* Get dev ID from parameter string */
1725 dev_id = strtoul(params, &end_param, 10);
1726 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1728 p_param = strtok(end_param, ",");
1729 mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1731 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1734 port_queue_id = strtoul(p_param, &end_param, 10);
1736 p_param = strtok(NULL, "\0");
1737 if (p_param != NULL)
1739 "Extra parameters passed to eventdev telemetry command, ignoring");
1741 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1744 RTE_INIT(eventdev_init_telemetry)
1746 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1747 "Returns list of available eventdevs. Takes no parameters");
1748 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1749 "Returns list of available ports. Parameter: DevID");
1750 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1751 "Returns list of available queues. Parameter: DevID");
1753 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1754 "Returns stats for an eventdev. Parameter: DevID");
1755 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1756 "Returns stats for an eventdev port. Params: DevID,PortID");
1757 rte_telemetry_register_cmd("/eventdev/queue_xstats",
1758 handle_queue_xstats,
1759 "Returns stats for an eventdev queue. Params: DevID,QueueID");
1760 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1761 "Returns links for an eventdev port. Params: DevID,QueueID");