1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <cryptodev_pmd.h>
24 #include <rte_telemetry.h>
26 #include "rte_eventdev.h"
27 #include "eventdev_pmd.h"
28 #include "eventdev_trace.h"
30 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
32 struct rte_eventdev *rte_eventdevs = rte_event_devices;
34 static struct rte_eventdev_global eventdev_globals = {
38 /* Public fastpath APIs. */
39 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
41 /* Event dev north bound API implementation */
44 rte_event_dev_count(void)
46 return eventdev_globals.nb_devs;
50 rte_event_dev_get_dev_id(const char *name)
58 for (i = 0; i < eventdev_globals.nb_devs; i++) {
59 cmp = (strncmp(rte_event_devices[i].data->name, name,
60 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
61 (rte_event_devices[i].dev ? (strncmp(
62 rte_event_devices[i].dev->driver->name, name,
63 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
64 if (cmp && (rte_event_devices[i].attached ==
65 RTE_EVENTDEV_ATTACHED))
72 rte_event_dev_socket_id(uint8_t dev_id)
74 struct rte_eventdev *dev;
76 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77 dev = &rte_eventdevs[dev_id];
79 return dev->data->socket_id;
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
85 struct rte_eventdev *dev;
87 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 dev = &rte_eventdevs[dev_id];
93 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
95 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
98 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
100 dev_info->dev = dev->dev;
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
108 struct rte_eventdev *dev;
110 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
113 dev = &rte_eventdevs[dev_id];
118 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
119 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
123 return dev->dev_ops->eth_rx_adapter_caps_get ?
124 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
125 &rte_eth_devices[eth_port_id],
131 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
133 struct rte_eventdev *dev;
134 const struct event_timer_adapter_ops *ops;
136 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
138 dev = &rte_eventdevs[dev_id];
144 return dev->dev_ops->timer_adapter_caps_get ?
145 (*dev->dev_ops->timer_adapter_caps_get)(dev,
153 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
156 struct rte_eventdev *dev;
157 struct rte_cryptodev *cdev;
159 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
160 if (!rte_cryptodev_is_valid_dev(cdev_id))
163 dev = &rte_eventdevs[dev_id];
164 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
169 if (dev->dev_ops->crypto_adapter_caps_get == NULL)
170 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
174 return dev->dev_ops->crypto_adapter_caps_get ?
175 (*dev->dev_ops->crypto_adapter_caps_get)
176 (dev, cdev, caps) : 0;
180 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
183 struct rte_eventdev *dev;
184 struct rte_eth_dev *eth_dev;
186 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
187 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
189 dev = &rte_eventdevs[dev_id];
190 eth_dev = &rte_eth_devices[eth_port_id];
195 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
196 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
200 return dev->dev_ops->eth_tx_adapter_caps_get ?
201 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
208 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
210 uint8_t old_nb_queues = dev->data->nb_queues;
211 struct rte_event_queue_conf *queues_cfg;
214 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
217 if (nb_queues != 0) {
218 queues_cfg = dev->data->queues_cfg;
219 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
221 for (i = nb_queues; i < old_nb_queues; i++)
222 (*dev->dev_ops->queue_release)(dev, i);
225 if (nb_queues > old_nb_queues) {
226 uint8_t new_qs = nb_queues - old_nb_queues;
228 memset(queues_cfg + old_nb_queues, 0,
229 sizeof(queues_cfg[0]) * new_qs);
232 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
234 for (i = nb_queues; i < old_nb_queues; i++)
235 (*dev->dev_ops->queue_release)(dev, i);
238 dev->data->nb_queues = nb_queues;
242 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
245 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
247 uint8_t old_nb_ports = dev->data->nb_ports;
250 struct rte_event_port_conf *ports_cfg;
253 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
256 if (nb_ports != 0) { /* re-config */
257 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
259 ports = dev->data->ports;
260 ports_cfg = dev->data->ports_cfg;
261 links_map = dev->data->links_map;
263 for (i = nb_ports; i < old_nb_ports; i++)
264 (*dev->dev_ops->port_release)(ports[i]);
266 if (nb_ports > old_nb_ports) {
267 uint8_t new_ps = nb_ports - old_nb_ports;
268 unsigned int old_links_map_end =
269 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
270 unsigned int links_map_end =
271 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
273 memset(ports + old_nb_ports, 0,
274 sizeof(ports[0]) * new_ps);
275 memset(ports_cfg + old_nb_ports, 0,
276 sizeof(ports_cfg[0]) * new_ps);
277 for (i = old_links_map_end; i < links_map_end; i++)
279 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
282 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
284 ports = dev->data->ports;
285 for (i = nb_ports; i < old_nb_ports; i++) {
286 (*dev->dev_ops->port_release)(ports[i]);
291 dev->data->nb_ports = nb_ports;
296 rte_event_dev_configure(uint8_t dev_id,
297 const struct rte_event_dev_config *dev_conf)
299 struct rte_event_dev_info info;
300 struct rte_eventdev *dev;
303 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
304 dev = &rte_eventdevs[dev_id];
306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
307 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
309 if (dev->data->dev_started) {
311 "device %d must be stopped to allow configuration", dev_id);
315 if (dev_conf == NULL)
318 (*dev->dev_ops->dev_infos_get)(dev, &info);
320 /* Check dequeue_timeout_ns value is in limit */
321 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
322 if (dev_conf->dequeue_timeout_ns &&
323 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
324 || dev_conf->dequeue_timeout_ns >
325 info.max_dequeue_timeout_ns)) {
326 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
327 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
328 dev_id, dev_conf->dequeue_timeout_ns,
329 info.min_dequeue_timeout_ns,
330 info.max_dequeue_timeout_ns);
335 /* Check nb_events_limit is in limit */
336 if (dev_conf->nb_events_limit > info.max_num_events) {
337 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
338 dev_id, dev_conf->nb_events_limit, info.max_num_events);
342 /* Check nb_event_queues is in limit */
343 if (!dev_conf->nb_event_queues) {
344 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
348 if (dev_conf->nb_event_queues > info.max_event_queues +
349 info.max_single_link_event_port_queue_pairs) {
350 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
351 dev_id, dev_conf->nb_event_queues,
352 info.max_event_queues,
353 info.max_single_link_event_port_queue_pairs);
356 if (dev_conf->nb_event_queues -
357 dev_conf->nb_single_link_event_port_queues >
358 info.max_event_queues) {
359 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
360 dev_id, dev_conf->nb_event_queues,
361 dev_conf->nb_single_link_event_port_queues,
362 info.max_event_queues);
365 if (dev_conf->nb_single_link_event_port_queues >
366 dev_conf->nb_event_queues) {
367 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
369 dev_conf->nb_single_link_event_port_queues,
370 dev_conf->nb_event_queues);
374 /* Check nb_event_ports is in limit */
375 if (!dev_conf->nb_event_ports) {
376 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
379 if (dev_conf->nb_event_ports > info.max_event_ports +
380 info.max_single_link_event_port_queue_pairs) {
381 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
382 dev_id, dev_conf->nb_event_ports,
383 info.max_event_ports,
384 info.max_single_link_event_port_queue_pairs);
387 if (dev_conf->nb_event_ports -
388 dev_conf->nb_single_link_event_port_queues
389 > info.max_event_ports) {
390 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
391 dev_id, dev_conf->nb_event_ports,
392 dev_conf->nb_single_link_event_port_queues,
393 info.max_event_ports);
397 if (dev_conf->nb_single_link_event_port_queues >
398 dev_conf->nb_event_ports) {
400 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
402 dev_conf->nb_single_link_event_port_queues,
403 dev_conf->nb_event_ports);
407 /* Check nb_event_queue_flows is in limit */
408 if (!dev_conf->nb_event_queue_flows) {
409 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
412 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
413 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
414 dev_id, dev_conf->nb_event_queue_flows,
415 info.max_event_queue_flows);
419 /* Check nb_event_port_dequeue_depth is in limit */
420 if (!dev_conf->nb_event_port_dequeue_depth) {
421 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
425 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
426 (dev_conf->nb_event_port_dequeue_depth >
427 info.max_event_port_dequeue_depth)) {
428 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
429 dev_id, dev_conf->nb_event_port_dequeue_depth,
430 info.max_event_port_dequeue_depth);
434 /* Check nb_event_port_enqueue_depth is in limit */
435 if (!dev_conf->nb_event_port_enqueue_depth) {
436 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
440 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
441 (dev_conf->nb_event_port_enqueue_depth >
442 info.max_event_port_enqueue_depth)) {
443 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
444 dev_id, dev_conf->nb_event_port_enqueue_depth,
445 info.max_event_port_enqueue_depth);
449 /* Copy the dev_conf parameter into the dev structure */
450 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
452 /* Setup new number of queues and reconfigure device. */
453 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
455 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
460 /* Setup new number of ports and reconfigure device. */
461 diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
463 event_dev_queue_config(dev, 0);
464 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
469 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
471 /* Configure the device */
472 diag = (*dev->dev_ops->dev_configure)(dev);
474 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
475 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
476 event_dev_queue_config(dev, 0);
477 event_dev_port_config(dev, 0);
480 dev->data->event_dev_cap = info.event_dev_cap;
481 rte_eventdev_trace_configure(dev_id, dev_conf, diag);
486 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
488 if (queue_id < dev->data->nb_queues && queue_id <
489 RTE_EVENT_MAX_QUEUES_PER_DEV)
496 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
497 struct rte_event_queue_conf *queue_conf)
499 struct rte_eventdev *dev;
501 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
502 dev = &rte_eventdevs[dev_id];
504 if (queue_conf == NULL)
507 if (!is_valid_queue(dev, queue_id)) {
508 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
512 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
513 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
514 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
519 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
522 !(queue_conf->event_queue_cfg &
523 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
524 ((queue_conf->event_queue_cfg &
525 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
526 (queue_conf->schedule_type
527 == RTE_SCHED_TYPE_ATOMIC)
535 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
538 !(queue_conf->event_queue_cfg &
539 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
540 ((queue_conf->event_queue_cfg &
541 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
542 (queue_conf->schedule_type
543 == RTE_SCHED_TYPE_ORDERED)
552 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
553 const struct rte_event_queue_conf *queue_conf)
555 struct rte_eventdev *dev;
556 struct rte_event_queue_conf def_conf;
558 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
559 dev = &rte_eventdevs[dev_id];
561 if (!is_valid_queue(dev, queue_id)) {
562 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
566 /* Check nb_atomic_flows limit */
567 if (is_valid_atomic_queue_conf(queue_conf)) {
568 if (queue_conf->nb_atomic_flows == 0 ||
569 queue_conf->nb_atomic_flows >
570 dev->data->dev_conf.nb_event_queue_flows) {
572 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
573 dev_id, queue_id, queue_conf->nb_atomic_flows,
574 dev->data->dev_conf.nb_event_queue_flows);
579 /* Check nb_atomic_order_sequences limit */
580 if (is_valid_ordered_queue_conf(queue_conf)) {
581 if (queue_conf->nb_atomic_order_sequences == 0 ||
582 queue_conf->nb_atomic_order_sequences >
583 dev->data->dev_conf.nb_event_queue_flows) {
585 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
586 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
587 dev->data->dev_conf.nb_event_queue_flows);
592 if (dev->data->dev_started) {
594 "device %d must be stopped to allow queue setup", dev_id);
598 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
600 if (queue_conf == NULL) {
601 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
603 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
604 queue_conf = &def_conf;
607 dev->data->queues_cfg[queue_id] = *queue_conf;
608 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
609 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
613 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
615 if (port_id < dev->data->nb_ports)
622 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
623 struct rte_event_port_conf *port_conf)
625 struct rte_eventdev *dev;
627 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
628 dev = &rte_eventdevs[dev_id];
630 if (port_conf == NULL)
633 if (!is_valid_port(dev, port_id)) {
634 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
638 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
639 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
640 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
645 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
646 const struct rte_event_port_conf *port_conf)
648 struct rte_eventdev *dev;
649 struct rte_event_port_conf def_conf;
652 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
653 dev = &rte_eventdevs[dev_id];
655 if (!is_valid_port(dev, port_id)) {
656 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
660 /* Check new_event_threshold limit */
661 if ((port_conf && !port_conf->new_event_threshold) ||
662 (port_conf && port_conf->new_event_threshold >
663 dev->data->dev_conf.nb_events_limit)) {
665 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
666 dev_id, port_id, port_conf->new_event_threshold,
667 dev->data->dev_conf.nb_events_limit);
671 /* Check dequeue_depth limit */
672 if ((port_conf && !port_conf->dequeue_depth) ||
673 (port_conf && port_conf->dequeue_depth >
674 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
676 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
677 dev_id, port_id, port_conf->dequeue_depth,
678 dev->data->dev_conf.nb_event_port_dequeue_depth);
682 /* Check enqueue_depth limit */
683 if ((port_conf && !port_conf->enqueue_depth) ||
684 (port_conf && port_conf->enqueue_depth >
685 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
687 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
688 dev_id, port_id, port_conf->enqueue_depth,
689 dev->data->dev_conf.nb_event_port_enqueue_depth);
694 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
695 !(dev->data->event_dev_cap &
696 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
698 "dev%d port%d Implicit release disable not supported",
703 if (dev->data->dev_started) {
705 "device %d must be stopped to allow port setup", dev_id);
709 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
711 if (port_conf == NULL) {
712 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
714 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
715 port_conf = &def_conf;
718 dev->data->ports_cfg[port_id] = *port_conf;
720 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
722 /* Unlink all the queues from this port(default state after setup) */
724 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
726 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
734 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
735 rte_eventdev_port_flush_t release_cb, void *args)
737 struct rte_eventdev *dev;
739 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
740 dev = &rte_eventdevs[dev_id];
742 if (!is_valid_port(dev, port_id)) {
743 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
747 if (dev->dev_ops->port_quiesce)
748 (*dev->dev_ops->port_quiesce)(dev, dev->data->ports[port_id],
753 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
754 uint32_t *attr_value)
756 struct rte_eventdev *dev;
760 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
761 dev = &rte_eventdevs[dev_id];
764 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
765 *attr_value = dev->data->nb_ports;
767 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
768 *attr_value = dev->data->nb_queues;
770 case RTE_EVENT_DEV_ATTR_STARTED:
771 *attr_value = dev->data->dev_started;
781 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
782 uint32_t *attr_value)
784 struct rte_eventdev *dev;
789 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
790 dev = &rte_eventdevs[dev_id];
791 if (!is_valid_port(dev, port_id)) {
792 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
797 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
798 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
800 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
801 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
803 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
804 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
806 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
810 config = dev->data->ports_cfg[port_id].event_port_cfg;
811 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
821 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
822 uint32_t *attr_value)
824 struct rte_event_queue_conf *conf;
825 struct rte_eventdev *dev;
830 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
831 dev = &rte_eventdevs[dev_id];
832 if (!is_valid_queue(dev, queue_id)) {
833 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
837 conf = &dev->data->queues_cfg[queue_id];
840 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
841 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
842 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
843 *attr_value = conf->priority;
845 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
846 *attr_value = conf->nb_atomic_flows;
848 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
849 *attr_value = conf->nb_atomic_order_sequences;
851 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
852 *attr_value = conf->event_queue_cfg;
854 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
855 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
858 *attr_value = conf->schedule_type;
867 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
868 const uint8_t queues[], const uint8_t priorities[],
871 struct rte_eventdev *dev;
872 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
873 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
877 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
878 dev = &rte_eventdevs[dev_id];
880 if (*dev->dev_ops->port_link == NULL) {
881 RTE_EDEV_LOG_ERR("Function not supported\n");
886 if (!is_valid_port(dev, port_id)) {
887 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
892 if (queues == NULL) {
893 for (i = 0; i < dev->data->nb_queues; i++)
896 queues = queues_list;
897 nb_links = dev->data->nb_queues;
900 if (priorities == NULL) {
901 for (i = 0; i < nb_links; i++)
902 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
904 priorities = priorities_list;
907 for (i = 0; i < nb_links; i++)
908 if (queues[i] >= dev->data->nb_queues) {
913 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
914 queues, priorities, nb_links);
918 links_map = dev->data->links_map;
919 /* Point links_map to this port specific area */
920 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
921 for (i = 0; i < diag; i++)
922 links_map[queues[i]] = (uint8_t)priorities[i];
924 rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
929 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
930 uint8_t queues[], uint16_t nb_unlinks)
932 struct rte_eventdev *dev;
933 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
937 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
938 dev = &rte_eventdevs[dev_id];
940 if (*dev->dev_ops->port_unlink == NULL) {
941 RTE_EDEV_LOG_ERR("Function not supported");
946 if (!is_valid_port(dev, port_id)) {
947 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
952 links_map = dev->data->links_map;
953 /* Point links_map to this port specific area */
954 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
956 if (queues == NULL) {
958 for (i = 0; i < dev->data->nb_queues; i++) {
960 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
967 for (j = 0; j < nb_unlinks; j++) {
968 if (links_map[queues[j]] ==
969 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
975 for (i = 0; i < nb_unlinks; i++)
976 if (queues[i] >= dev->data->nb_queues) {
981 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
987 for (i = 0; i < diag; i++)
988 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
990 rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
995 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
997 struct rte_eventdev *dev;
999 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1000 dev = &rte_eventdevs[dev_id];
1001 if (!is_valid_port(dev, port_id)) {
1002 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1006 /* Return 0 if the PMD does not implement unlinks in progress.
1007 * This allows PMDs which handle unlink synchronously to not implement
1008 * this function at all.
1010 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1012 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1013 dev->data->ports[port_id]);
1017 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1018 uint8_t queues[], uint8_t priorities[])
1020 struct rte_eventdev *dev;
1021 uint16_t *links_map;
1024 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1025 dev = &rte_eventdevs[dev_id];
1026 if (!is_valid_port(dev, port_id)) {
1027 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1031 links_map = dev->data->links_map;
1032 /* Point links_map to this port specific area */
1033 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1034 for (i = 0; i < dev->data->nb_queues; i++) {
1035 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1037 priorities[count] = (uint8_t)links_map[i];
1045 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1046 uint64_t *timeout_ticks)
1048 struct rte_eventdev *dev;
1050 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1051 dev = &rte_eventdevs[dev_id];
1052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1054 if (timeout_ticks == NULL)
1057 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1061 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1063 struct rte_eventdev *dev;
1065 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1066 dev = &rte_eventdevs[dev_id];
1068 if (service_id == NULL)
1071 if (dev->data->service_inited)
1072 *service_id = dev->data->service_id;
1074 return dev->data->service_inited ? 0 : -ESRCH;
1078 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1080 struct rte_eventdev *dev;
1082 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1083 dev = &rte_eventdevs[dev_id];
1084 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1088 (*dev->dev_ops->dump)(dev, f);
1094 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1095 uint8_t queue_port_id)
1097 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1098 if (dev->dev_ops->xstats_get_names != NULL)
1099 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1106 rte_event_dev_xstats_names_get(uint8_t dev_id,
1107 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1108 struct rte_event_dev_xstats_name *xstats_names,
1109 unsigned int *ids, unsigned int size)
1111 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1112 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1114 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1115 (int)size < cnt_expected_entries)
1116 return cnt_expected_entries;
1118 /* dev_id checked above */
1119 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1121 if (dev->dev_ops->xstats_get_names != NULL)
1122 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1123 queue_port_id, xstats_names, ids, size);
1128 /* retrieve eventdev extended statistics */
1130 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1131 uint8_t queue_port_id, const unsigned int ids[],
1132 uint64_t values[], unsigned int n)
1134 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1135 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1137 /* implemented by the driver */
1138 if (dev->dev_ops->xstats_get != NULL)
1139 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1145 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1148 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1149 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1150 unsigned int temp = -1;
1153 *id = (unsigned int)-1;
1155 id = &temp; /* ensure driver never gets a NULL value */
1157 /* implemented by driver */
1158 if (dev->dev_ops->xstats_get_by_name != NULL)
1159 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1163 int rte_event_dev_xstats_reset(uint8_t dev_id,
1164 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1165 const uint32_t ids[], uint32_t nb_ids)
1167 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1168 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1170 if (dev->dev_ops->xstats_reset != NULL)
1171 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1176 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1178 int rte_event_dev_selftest(uint8_t dev_id)
1180 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1181 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1182 .name = "rte_event_pmd_selftest_seqn_dynfield",
1183 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1184 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1186 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1188 if (dev->dev_ops->dev_selftest != NULL) {
1189 rte_event_pmd_selftest_seqn_dynfield_offset =
1190 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1191 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1193 return (*dev->dev_ops->dev_selftest)();
1198 struct rte_mempool *
1199 rte_event_vector_pool_create(const char *name, unsigned int n,
1200 unsigned int cache_size, uint16_t nb_elem,
1203 const char *mp_ops_name;
1204 struct rte_mempool *mp;
1205 unsigned int elt_sz;
1209 RTE_LOG(ERR, EVENTDEV,
1210 "Invalid number of elements=%d requested\n", nb_elem);
1216 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1217 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1222 mp_ops_name = rte_mbuf_best_mempool_ops();
1223 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1225 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1229 ret = rte_mempool_populate_default(mp);
1235 rte_mempool_free(mp);
1241 rte_event_dev_start(uint8_t dev_id)
1243 struct rte_eventdev *dev;
1246 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1248 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1249 dev = &rte_eventdevs[dev_id];
1250 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1252 if (dev->data->dev_started != 0) {
1253 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1258 diag = (*dev->dev_ops->dev_start)(dev);
1259 rte_eventdev_trace_start(dev_id, diag);
1261 dev->data->dev_started = 1;
1265 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1271 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1272 eventdev_stop_flush_t callback, void *userdata)
1274 struct rte_eventdev *dev;
1276 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1278 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1279 dev = &rte_eventdevs[dev_id];
1281 dev->dev_ops->dev_stop_flush = callback;
1282 dev->data->dev_stop_flush_arg = userdata;
1288 rte_event_dev_stop(uint8_t dev_id)
1290 struct rte_eventdev *dev;
1292 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1294 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1295 dev = &rte_eventdevs[dev_id];
1296 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1298 if (dev->data->dev_started == 0) {
1299 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1304 dev->data->dev_started = 0;
1305 (*dev->dev_ops->dev_stop)(dev);
1306 rte_eventdev_trace_stop(dev_id);
1307 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1311 rte_event_dev_close(uint8_t dev_id)
1313 struct rte_eventdev *dev;
1315 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1316 dev = &rte_eventdevs[dev_id];
1317 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1319 /* Device must be stopped before it can be closed */
1320 if (dev->data->dev_started == 1) {
1321 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1326 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1327 rte_eventdev_trace_close(dev_id);
1328 return (*dev->dev_ops->dev_close)(dev);
1332 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1335 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1336 const struct rte_memzone *mz;
1339 /* Generate memzone name */
1340 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1341 if (n >= (int)sizeof(mz_name))
1344 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1345 mz = rte_memzone_reserve(mz_name,
1346 sizeof(struct rte_eventdev_data),
1349 mz = rte_memzone_lookup(mz_name);
1355 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1356 memset(*data, 0, sizeof(struct rte_eventdev_data));
1357 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1358 RTE_EVENT_MAX_QUEUES_PER_DEV;
1360 (*data)->links_map[n] =
1361 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1367 static inline uint8_t
1368 eventdev_find_free_device_index(void)
1372 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1373 if (rte_eventdevs[dev_id].attached ==
1374 RTE_EVENTDEV_DETACHED)
1377 return RTE_EVENT_MAX_DEVS;
1380 struct rte_eventdev *
1381 rte_event_pmd_allocate(const char *name, int socket_id)
1383 struct rte_eventdev *eventdev;
1386 if (rte_event_pmd_get_named_dev(name) != NULL) {
1387 RTE_EDEV_LOG_ERR("Event device with name %s already "
1388 "allocated!", name);
1392 dev_id = eventdev_find_free_device_index();
1393 if (dev_id == RTE_EVENT_MAX_DEVS) {
1394 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1398 eventdev = &rte_eventdevs[dev_id];
1400 if (eventdev->data == NULL) {
1401 struct rte_eventdev_data *eventdev_data = NULL;
1404 eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1406 if (retval < 0 || eventdev_data == NULL)
1409 eventdev->data = eventdev_data;
1411 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1413 strlcpy(eventdev->data->name, name,
1414 RTE_EVENTDEV_NAME_MAX_LEN);
1416 eventdev->data->dev_id = dev_id;
1417 eventdev->data->socket_id = socket_id;
1418 eventdev->data->dev_started = 0;
1421 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1422 eventdev_globals.nb_devs++;
1429 rte_event_pmd_release(struct rte_eventdev *eventdev)
1432 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1433 const struct rte_memzone *mz;
1435 if (eventdev == NULL)
1438 event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1439 eventdev->attached = RTE_EVENTDEV_DETACHED;
1440 eventdev_globals.nb_devs--;
1442 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1443 rte_free(eventdev->data->dev_private);
1445 /* Generate memzone name */
1446 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1447 eventdev->data->dev_id);
1448 if (ret >= (int)sizeof(mz_name))
1451 mz = rte_memzone_lookup(mz_name);
1455 ret = rte_memzone_free(mz);
1460 eventdev->data = NULL;
1465 event_dev_probing_finish(struct rte_eventdev *eventdev)
1467 if (eventdev == NULL)
1470 event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1475 handle_dev_list(const char *cmd __rte_unused,
1476 const char *params __rte_unused,
1477 struct rte_tel_data *d)
1480 int ndev = rte_event_dev_count();
1485 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1486 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1487 if (rte_eventdevs[dev_id].attached ==
1488 RTE_EVENTDEV_ATTACHED)
1489 rte_tel_data_add_array_int(d, dev_id);
1496 handle_port_list(const char *cmd __rte_unused,
1498 struct rte_tel_data *d)
1502 struct rte_eventdev *dev;
1505 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1508 dev_id = strtoul(params, &end_param, 10);
1509 if (*end_param != '\0')
1511 "Extra parameters passed to eventdev telemetry command, ignoring");
1513 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1514 dev = &rte_eventdevs[dev_id];
1516 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1517 for (i = 0; i < dev->data->nb_ports; i++)
1518 rte_tel_data_add_array_int(d, i);
1524 handle_queue_list(const char *cmd __rte_unused,
1526 struct rte_tel_data *d)
1530 struct rte_eventdev *dev;
1533 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1536 dev_id = strtoul(params, &end_param, 10);
1537 if (*end_param != '\0')
1539 "Extra parameters passed to eventdev telemetry command, ignoring");
1541 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1542 dev = &rte_eventdevs[dev_id];
1544 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1545 for (i = 0; i < dev->data->nb_queues; i++)
1546 rte_tel_data_add_array_int(d, i);
1552 handle_queue_links(const char *cmd __rte_unused,
1554 struct rte_tel_data *d)
1556 int i, ret, port_id = 0;
1559 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1560 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1561 const char *p_param;
1563 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1566 /* Get dev ID from parameter string */
1567 dev_id = strtoul(params, &end_param, 10);
1568 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1570 p_param = strtok(end_param, ",");
1571 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1574 port_id = strtoul(p_param, &end_param, 10);
1575 p_param = strtok(NULL, "\0");
1576 if (p_param != NULL)
1578 "Extra parameters passed to eventdev telemetry command, ignoring");
1580 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1584 rte_tel_data_start_dict(d);
1585 for (i = 0; i < ret; i++) {
1588 snprintf(qid_name, 31, "qid_%u", queues[i]);
1589 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1596 eventdev_build_telemetry_data(int dev_id,
1597 enum rte_event_dev_xstats_mode mode,
1599 struct rte_tel_data *d)
1601 struct rte_event_dev_xstats_name *xstat_names;
1604 int i, ret, num_xstats;
1606 num_xstats = rte_event_dev_xstats_names_get(dev_id,
1616 /* use one malloc for names */
1617 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1619 if (xstat_names == NULL)
1622 ids = malloc((sizeof(unsigned int)) * num_xstats);
1628 values = malloc((sizeof(uint64_t)) * num_xstats);
1629 if (values == NULL) {
1635 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1636 xstat_names, ids, num_xstats);
1637 if (ret < 0 || ret > num_xstats) {
1644 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1645 ids, values, num_xstats);
1646 if (ret < 0 || ret > num_xstats) {
1653 rte_tel_data_start_dict(d);
1654 for (i = 0; i < num_xstats; i++)
1655 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1665 handle_dev_xstats(const char *cmd __rte_unused,
1667 struct rte_tel_data *d)
1670 enum rte_event_dev_xstats_mode mode;
1673 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1676 /* Get dev ID from parameter string */
1677 dev_id = strtoul(params, &end_param, 10);
1678 if (*end_param != '\0')
1680 "Extra parameters passed to eventdev telemetry command, ignoring");
1682 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1684 mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1685 return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1689 handle_port_xstats(const char *cmd __rte_unused,
1691 struct rte_tel_data *d)
1694 int port_queue_id = 0;
1695 enum rte_event_dev_xstats_mode mode;
1697 const char *p_param;
1699 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1702 /* Get dev ID from parameter string */
1703 dev_id = strtoul(params, &end_param, 10);
1704 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1706 p_param = strtok(end_param, ",");
1707 mode = RTE_EVENT_DEV_XSTATS_PORT;
1709 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1712 port_queue_id = strtoul(p_param, &end_param, 10);
1714 p_param = strtok(NULL, "\0");
1715 if (p_param != NULL)
1717 "Extra parameters passed to eventdev telemetry command, ignoring");
1719 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1723 handle_queue_xstats(const char *cmd __rte_unused,
1725 struct rte_tel_data *d)
1728 int port_queue_id = 0;
1729 enum rte_event_dev_xstats_mode mode;
1731 const char *p_param;
1733 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1736 /* Get dev ID from parameter string */
1737 dev_id = strtoul(params, &end_param, 10);
1738 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1740 p_param = strtok(end_param, ",");
1741 mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1743 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1746 port_queue_id = strtoul(p_param, &end_param, 10);
1748 p_param = strtok(NULL, "\0");
1749 if (p_param != NULL)
1751 "Extra parameters passed to eventdev telemetry command, ignoring");
1753 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1756 RTE_INIT(eventdev_init_telemetry)
1758 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1759 "Returns list of available eventdevs. Takes no parameters");
1760 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1761 "Returns list of available ports. Parameter: DevID");
1762 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1763 "Returns list of available queues. Parameter: DevID");
1765 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1766 "Returns stats for an eventdev. Parameter: DevID");
1767 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1768 "Returns stats for an eventdev port. Params: DevID,PortID");
1769 rte_telemetry_register_cmd("/eventdev/queue_xstats",
1770 handle_queue_xstats,
1771 "Returns stats for an eventdev queue. Params: DevID,QueueID");
1772 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1773 "Returns links for an eventdev port. Params: DevID,QueueID");