1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <ethdev_driver.h>
33 #include <rte_cryptodev.h>
34 #include <cryptodev_pmd.h>
35 #include <rte_telemetry.h>
37 #include "rte_eventdev.h"
38 #include "eventdev_pmd.h"
39 #include "eventdev_trace.h"
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
45 static struct rte_eventdev_global eventdev_globals = {
49 /* Public fastpath APIs. */
50 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
52 /* Event dev north bound API implementation */
55 rte_event_dev_count(void)
57 return eventdev_globals.nb_devs;
61 rte_event_dev_get_dev_id(const char *name)
69 for (i = 0; i < eventdev_globals.nb_devs; i++) {
70 cmp = (strncmp(rte_event_devices[i].data->name, name,
71 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
72 (rte_event_devices[i].dev ? (strncmp(
73 rte_event_devices[i].dev->driver->name, name,
74 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
75 if (cmp && (rte_event_devices[i].attached ==
76 RTE_EVENTDEV_ATTACHED))
83 rte_event_dev_socket_id(uint8_t dev_id)
85 struct rte_eventdev *dev;
87 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 dev = &rte_eventdevs[dev_id];
90 return dev->data->socket_id;
94 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
96 struct rte_eventdev *dev;
98 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
99 dev = &rte_eventdevs[dev_id];
101 if (dev_info == NULL)
104 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
106 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
107 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
109 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
111 dev_info->dev = dev->dev;
116 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
119 struct rte_eventdev *dev;
121 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
122 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
124 dev = &rte_eventdevs[dev_id];
129 if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
130 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
134 return dev->dev_ops->eth_rx_adapter_caps_get ?
135 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
136 &rte_eth_devices[eth_port_id],
142 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
144 struct rte_eventdev *dev;
145 const struct event_timer_adapter_ops *ops;
147 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
149 dev = &rte_eventdevs[dev_id];
155 return dev->dev_ops->timer_adapter_caps_get ?
156 (*dev->dev_ops->timer_adapter_caps_get)(dev,
164 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
167 struct rte_eventdev *dev;
168 struct rte_cryptodev *cdev;
170 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
171 if (!rte_cryptodev_is_valid_dev(cdev_id))
174 dev = &rte_eventdevs[dev_id];
175 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
180 if (dev->dev_ops->crypto_adapter_caps_get == NULL)
181 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
185 return dev->dev_ops->crypto_adapter_caps_get ?
186 (*dev->dev_ops->crypto_adapter_caps_get)
187 (dev, cdev, caps) : 0;
191 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
194 struct rte_eventdev *dev;
195 struct rte_eth_dev *eth_dev;
197 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
198 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
200 dev = &rte_eventdevs[dev_id];
201 eth_dev = &rte_eth_devices[eth_port_id];
206 if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
207 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
211 return dev->dev_ops->eth_tx_adapter_caps_get ?
212 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
219 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
221 uint8_t old_nb_queues = dev->data->nb_queues;
222 struct rte_event_queue_conf *queues_cfg;
225 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
228 if (nb_queues != 0) {
229 queues_cfg = dev->data->queues_cfg;
230 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
232 for (i = nb_queues; i < old_nb_queues; i++)
233 (*dev->dev_ops->queue_release)(dev, i);
236 if (nb_queues > old_nb_queues) {
237 uint8_t new_qs = nb_queues - old_nb_queues;
239 memset(queues_cfg + old_nb_queues, 0,
240 sizeof(queues_cfg[0]) * new_qs);
243 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
245 for (i = nb_queues; i < old_nb_queues; i++)
246 (*dev->dev_ops->queue_release)(dev, i);
249 dev->data->nb_queues = nb_queues;
253 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
256 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
258 uint8_t old_nb_ports = dev->data->nb_ports;
261 struct rte_event_port_conf *ports_cfg;
264 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
267 if (nb_ports != 0) { /* re-config */
268 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
270 ports = dev->data->ports;
271 ports_cfg = dev->data->ports_cfg;
272 links_map = dev->data->links_map;
274 for (i = nb_ports; i < old_nb_ports; i++)
275 (*dev->dev_ops->port_release)(ports[i]);
277 if (nb_ports > old_nb_ports) {
278 uint8_t new_ps = nb_ports - old_nb_ports;
279 unsigned int old_links_map_end =
280 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
281 unsigned int links_map_end =
282 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
284 memset(ports + old_nb_ports, 0,
285 sizeof(ports[0]) * new_ps);
286 memset(ports_cfg + old_nb_ports, 0,
287 sizeof(ports_cfg[0]) * new_ps);
288 for (i = old_links_map_end; i < links_map_end; i++)
290 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
293 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
295 ports = dev->data->ports;
296 for (i = nb_ports; i < old_nb_ports; i++) {
297 (*dev->dev_ops->port_release)(ports[i]);
302 dev->data->nb_ports = nb_ports;
307 rte_event_dev_configure(uint8_t dev_id,
308 const struct rte_event_dev_config *dev_conf)
310 struct rte_event_dev_info info;
311 struct rte_eventdev *dev;
314 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
315 dev = &rte_eventdevs[dev_id];
317 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
318 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
320 if (dev->data->dev_started) {
322 "device %d must be stopped to allow configuration", dev_id);
326 if (dev_conf == NULL)
329 (*dev->dev_ops->dev_infos_get)(dev, &info);
331 /* Check dequeue_timeout_ns value is in limit */
332 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
333 if (dev_conf->dequeue_timeout_ns &&
334 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
335 || dev_conf->dequeue_timeout_ns >
336 info.max_dequeue_timeout_ns)) {
337 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
338 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
339 dev_id, dev_conf->dequeue_timeout_ns,
340 info.min_dequeue_timeout_ns,
341 info.max_dequeue_timeout_ns);
346 /* Check nb_events_limit is in limit */
347 if (dev_conf->nb_events_limit > info.max_num_events) {
348 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
349 dev_id, dev_conf->nb_events_limit, info.max_num_events);
353 /* Check nb_event_queues is in limit */
354 if (!dev_conf->nb_event_queues) {
355 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
359 if (dev_conf->nb_event_queues > info.max_event_queues +
360 info.max_single_link_event_port_queue_pairs) {
361 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
362 dev_id, dev_conf->nb_event_queues,
363 info.max_event_queues,
364 info.max_single_link_event_port_queue_pairs);
367 if (dev_conf->nb_event_queues -
368 dev_conf->nb_single_link_event_port_queues >
369 info.max_event_queues) {
370 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
371 dev_id, dev_conf->nb_event_queues,
372 dev_conf->nb_single_link_event_port_queues,
373 info.max_event_queues);
376 if (dev_conf->nb_single_link_event_port_queues >
377 dev_conf->nb_event_queues) {
378 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
380 dev_conf->nb_single_link_event_port_queues,
381 dev_conf->nb_event_queues);
385 /* Check nb_event_ports is in limit */
386 if (!dev_conf->nb_event_ports) {
387 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
390 if (dev_conf->nb_event_ports > info.max_event_ports +
391 info.max_single_link_event_port_queue_pairs) {
392 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
393 dev_id, dev_conf->nb_event_ports,
394 info.max_event_ports,
395 info.max_single_link_event_port_queue_pairs);
398 if (dev_conf->nb_event_ports -
399 dev_conf->nb_single_link_event_port_queues
400 > info.max_event_ports) {
401 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
402 dev_id, dev_conf->nb_event_ports,
403 dev_conf->nb_single_link_event_port_queues,
404 info.max_event_ports);
408 if (dev_conf->nb_single_link_event_port_queues >
409 dev_conf->nb_event_ports) {
411 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
413 dev_conf->nb_single_link_event_port_queues,
414 dev_conf->nb_event_ports);
418 /* Check nb_event_queue_flows is in limit */
419 if (!dev_conf->nb_event_queue_flows) {
420 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
423 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
424 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
425 dev_id, dev_conf->nb_event_queue_flows,
426 info.max_event_queue_flows);
430 /* Check nb_event_port_dequeue_depth is in limit */
431 if (!dev_conf->nb_event_port_dequeue_depth) {
432 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
436 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
437 (dev_conf->nb_event_port_dequeue_depth >
438 info.max_event_port_dequeue_depth)) {
439 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
440 dev_id, dev_conf->nb_event_port_dequeue_depth,
441 info.max_event_port_dequeue_depth);
445 /* Check nb_event_port_enqueue_depth is in limit */
446 if (!dev_conf->nb_event_port_enqueue_depth) {
447 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
451 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
452 (dev_conf->nb_event_port_enqueue_depth >
453 info.max_event_port_enqueue_depth)) {
454 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
455 dev_id, dev_conf->nb_event_port_enqueue_depth,
456 info.max_event_port_enqueue_depth);
460 /* Copy the dev_conf parameter into the dev structure */
461 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
463 /* Setup new number of queues and reconfigure device. */
464 diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
466 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
471 /* Setup new number of ports and reconfigure device. */
472 diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
474 event_dev_queue_config(dev, 0);
475 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
480 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
482 /* Configure the device */
483 diag = (*dev->dev_ops->dev_configure)(dev);
485 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
486 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
487 event_dev_queue_config(dev, 0);
488 event_dev_port_config(dev, 0);
491 dev->data->event_dev_cap = info.event_dev_cap;
492 rte_eventdev_trace_configure(dev_id, dev_conf, diag);
497 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
499 if (queue_id < dev->data->nb_queues && queue_id <
500 RTE_EVENT_MAX_QUEUES_PER_DEV)
507 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
508 struct rte_event_queue_conf *queue_conf)
510 struct rte_eventdev *dev;
512 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
513 dev = &rte_eventdevs[dev_id];
515 if (queue_conf == NULL)
518 if (!is_valid_queue(dev, queue_id)) {
519 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
523 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
524 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
525 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
530 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
533 !(queue_conf->event_queue_cfg &
534 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
535 ((queue_conf->event_queue_cfg &
536 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
537 (queue_conf->schedule_type
538 == RTE_SCHED_TYPE_ATOMIC)
546 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
549 !(queue_conf->event_queue_cfg &
550 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
551 ((queue_conf->event_queue_cfg &
552 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
553 (queue_conf->schedule_type
554 == RTE_SCHED_TYPE_ORDERED)
563 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
564 const struct rte_event_queue_conf *queue_conf)
566 struct rte_eventdev *dev;
567 struct rte_event_queue_conf def_conf;
569 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
570 dev = &rte_eventdevs[dev_id];
572 if (!is_valid_queue(dev, queue_id)) {
573 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
577 /* Check nb_atomic_flows limit */
578 if (is_valid_atomic_queue_conf(queue_conf)) {
579 if (queue_conf->nb_atomic_flows == 0 ||
580 queue_conf->nb_atomic_flows >
581 dev->data->dev_conf.nb_event_queue_flows) {
583 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
584 dev_id, queue_id, queue_conf->nb_atomic_flows,
585 dev->data->dev_conf.nb_event_queue_flows);
590 /* Check nb_atomic_order_sequences limit */
591 if (is_valid_ordered_queue_conf(queue_conf)) {
592 if (queue_conf->nb_atomic_order_sequences == 0 ||
593 queue_conf->nb_atomic_order_sequences >
594 dev->data->dev_conf.nb_event_queue_flows) {
596 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
597 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
598 dev->data->dev_conf.nb_event_queue_flows);
603 if (dev->data->dev_started) {
605 "device %d must be stopped to allow queue setup", dev_id);
609 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
611 if (queue_conf == NULL) {
612 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
614 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
615 queue_conf = &def_conf;
618 dev->data->queues_cfg[queue_id] = *queue_conf;
619 rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
620 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
624 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
626 if (port_id < dev->data->nb_ports)
633 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
634 struct rte_event_port_conf *port_conf)
636 struct rte_eventdev *dev;
638 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
639 dev = &rte_eventdevs[dev_id];
641 if (port_conf == NULL)
644 if (!is_valid_port(dev, port_id)) {
645 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
649 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
650 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
651 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
656 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
657 const struct rte_event_port_conf *port_conf)
659 struct rte_eventdev *dev;
660 struct rte_event_port_conf def_conf;
663 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
664 dev = &rte_eventdevs[dev_id];
666 if (!is_valid_port(dev, port_id)) {
667 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
671 /* Check new_event_threshold limit */
672 if ((port_conf && !port_conf->new_event_threshold) ||
673 (port_conf && port_conf->new_event_threshold >
674 dev->data->dev_conf.nb_events_limit)) {
676 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
677 dev_id, port_id, port_conf->new_event_threshold,
678 dev->data->dev_conf.nb_events_limit);
682 /* Check dequeue_depth limit */
683 if ((port_conf && !port_conf->dequeue_depth) ||
684 (port_conf && port_conf->dequeue_depth >
685 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
687 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
688 dev_id, port_id, port_conf->dequeue_depth,
689 dev->data->dev_conf.nb_event_port_dequeue_depth);
693 /* Check enqueue_depth limit */
694 if ((port_conf && !port_conf->enqueue_depth) ||
695 (port_conf && port_conf->enqueue_depth >
696 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
698 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
699 dev_id, port_id, port_conf->enqueue_depth,
700 dev->data->dev_conf.nb_event_port_enqueue_depth);
705 (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
706 !(dev->data->event_dev_cap &
707 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
709 "dev%d port%d Implicit release disable not supported",
714 if (dev->data->dev_started) {
716 "device %d must be stopped to allow port setup", dev_id);
720 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
722 if (port_conf == NULL) {
723 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
725 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
726 port_conf = &def_conf;
729 dev->data->ports_cfg[port_id] = *port_conf;
731 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
733 /* Unlink all the queues from this port(default state after setup) */
735 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
737 rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
745 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
746 uint32_t *attr_value)
748 struct rte_eventdev *dev;
752 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
753 dev = &rte_eventdevs[dev_id];
756 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
757 *attr_value = dev->data->nb_ports;
759 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
760 *attr_value = dev->data->nb_queues;
762 case RTE_EVENT_DEV_ATTR_STARTED:
763 *attr_value = dev->data->dev_started;
773 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
774 uint32_t *attr_value)
776 struct rte_eventdev *dev;
781 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
782 dev = &rte_eventdevs[dev_id];
783 if (!is_valid_port(dev, port_id)) {
784 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
789 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
790 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
792 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
793 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
795 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
796 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
798 case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
802 config = dev->data->ports_cfg[port_id].event_port_cfg;
803 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
813 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
814 uint32_t *attr_value)
816 struct rte_event_queue_conf *conf;
817 struct rte_eventdev *dev;
822 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
823 dev = &rte_eventdevs[dev_id];
824 if (!is_valid_queue(dev, queue_id)) {
825 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
829 conf = &dev->data->queues_cfg[queue_id];
832 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
833 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
834 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
835 *attr_value = conf->priority;
837 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
838 *attr_value = conf->nb_atomic_flows;
840 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
841 *attr_value = conf->nb_atomic_order_sequences;
843 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
844 *attr_value = conf->event_queue_cfg;
846 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
847 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
850 *attr_value = conf->schedule_type;
859 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
860 const uint8_t queues[], const uint8_t priorities[],
863 struct rte_eventdev *dev;
864 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
865 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
869 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
870 dev = &rte_eventdevs[dev_id];
872 if (*dev->dev_ops->port_link == NULL) {
873 RTE_EDEV_LOG_ERR("Function not supported\n");
878 if (!is_valid_port(dev, port_id)) {
879 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
884 if (queues == NULL) {
885 for (i = 0; i < dev->data->nb_queues; i++)
888 queues = queues_list;
889 nb_links = dev->data->nb_queues;
892 if (priorities == NULL) {
893 for (i = 0; i < nb_links; i++)
894 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
896 priorities = priorities_list;
899 for (i = 0; i < nb_links; i++)
900 if (queues[i] >= dev->data->nb_queues) {
905 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
906 queues, priorities, nb_links);
910 links_map = dev->data->links_map;
911 /* Point links_map to this port specific area */
912 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
913 for (i = 0; i < diag; i++)
914 links_map[queues[i]] = (uint8_t)priorities[i];
916 rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
921 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
922 uint8_t queues[], uint16_t nb_unlinks)
924 struct rte_eventdev *dev;
925 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
929 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
930 dev = &rte_eventdevs[dev_id];
932 if (*dev->dev_ops->port_unlink == NULL) {
933 RTE_EDEV_LOG_ERR("Function not supported");
938 if (!is_valid_port(dev, port_id)) {
939 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
944 links_map = dev->data->links_map;
945 /* Point links_map to this port specific area */
946 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
948 if (queues == NULL) {
950 for (i = 0; i < dev->data->nb_queues; i++) {
952 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
959 for (j = 0; j < nb_unlinks; j++) {
960 if (links_map[queues[j]] ==
961 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
967 for (i = 0; i < nb_unlinks; i++)
968 if (queues[i] >= dev->data->nb_queues) {
973 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
979 for (i = 0; i < diag; i++)
980 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
982 rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
987 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
989 struct rte_eventdev *dev;
991 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
992 dev = &rte_eventdevs[dev_id];
993 if (!is_valid_port(dev, port_id)) {
994 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
998 /* Return 0 if the PMD does not implement unlinks in progress.
999 * This allows PMDs which handle unlink synchronously to not implement
1000 * this function at all.
1002 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1004 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1005 dev->data->ports[port_id]);
1009 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1010 uint8_t queues[], uint8_t priorities[])
1012 struct rte_eventdev *dev;
1013 uint16_t *links_map;
1016 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1017 dev = &rte_eventdevs[dev_id];
1018 if (!is_valid_port(dev, port_id)) {
1019 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1023 links_map = dev->data->links_map;
1024 /* Point links_map to this port specific area */
1025 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1026 for (i = 0; i < dev->data->nb_queues; i++) {
1027 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1029 priorities[count] = (uint8_t)links_map[i];
1037 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1038 uint64_t *timeout_ticks)
1040 struct rte_eventdev *dev;
1042 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1043 dev = &rte_eventdevs[dev_id];
1044 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1046 if (timeout_ticks == NULL)
1049 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1053 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1055 struct rte_eventdev *dev;
1057 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1058 dev = &rte_eventdevs[dev_id];
1060 if (service_id == NULL)
1063 if (dev->data->service_inited)
1064 *service_id = dev->data->service_id;
1066 return dev->data->service_inited ? 0 : -ESRCH;
1070 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1072 struct rte_eventdev *dev;
1074 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1075 dev = &rte_eventdevs[dev_id];
1076 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1080 (*dev->dev_ops->dump)(dev, f);
1086 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1087 uint8_t queue_port_id)
1089 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1090 if (dev->dev_ops->xstats_get_names != NULL)
1091 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1098 rte_event_dev_xstats_names_get(uint8_t dev_id,
1099 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1100 struct rte_event_dev_xstats_name *xstats_names,
1101 unsigned int *ids, unsigned int size)
1103 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1104 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1106 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1107 (int)size < cnt_expected_entries)
1108 return cnt_expected_entries;
1110 /* dev_id checked above */
1111 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1113 if (dev->dev_ops->xstats_get_names != NULL)
1114 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1115 queue_port_id, xstats_names, ids, size);
1120 /* retrieve eventdev extended statistics */
1122 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1123 uint8_t queue_port_id, const unsigned int ids[],
1124 uint64_t values[], unsigned int n)
1126 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1127 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1129 /* implemented by the driver */
1130 if (dev->dev_ops->xstats_get != NULL)
1131 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1137 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1140 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1141 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1142 unsigned int temp = -1;
1145 *id = (unsigned int)-1;
1147 id = &temp; /* ensure driver never gets a NULL value */
1149 /* implemented by driver */
1150 if (dev->dev_ops->xstats_get_by_name != NULL)
1151 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1155 int rte_event_dev_xstats_reset(uint8_t dev_id,
1156 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1157 const uint32_t ids[], uint32_t nb_ids)
1159 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1160 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1162 if (dev->dev_ops->xstats_reset != NULL)
1163 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1168 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1170 int rte_event_dev_selftest(uint8_t dev_id)
1172 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1173 static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1174 .name = "rte_event_pmd_selftest_seqn_dynfield",
1175 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1176 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1178 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1180 if (dev->dev_ops->dev_selftest != NULL) {
1181 rte_event_pmd_selftest_seqn_dynfield_offset =
1182 rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1183 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1185 return (*dev->dev_ops->dev_selftest)();
1190 struct rte_mempool *
1191 rte_event_vector_pool_create(const char *name, unsigned int n,
1192 unsigned int cache_size, uint16_t nb_elem,
1195 const char *mp_ops_name;
1196 struct rte_mempool *mp;
1197 unsigned int elt_sz;
1201 RTE_LOG(ERR, EVENTDEV,
1202 "Invalid number of elements=%d requested\n", nb_elem);
1208 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1209 mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1214 mp_ops_name = rte_mbuf_best_mempool_ops();
1215 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1217 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1221 ret = rte_mempool_populate_default(mp);
1227 rte_mempool_free(mp);
1233 rte_event_dev_start(uint8_t dev_id)
1235 struct rte_eventdev *dev;
1238 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1240 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1241 dev = &rte_eventdevs[dev_id];
1242 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1244 if (dev->data->dev_started != 0) {
1245 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1250 diag = (*dev->dev_ops->dev_start)(dev);
1251 rte_eventdev_trace_start(dev_id, diag);
1253 dev->data->dev_started = 1;
1257 event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1263 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1264 eventdev_stop_flush_t callback, void *userdata)
1266 struct rte_eventdev *dev;
1268 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1270 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1271 dev = &rte_eventdevs[dev_id];
1273 dev->dev_ops->dev_stop_flush = callback;
1274 dev->data->dev_stop_flush_arg = userdata;
1280 rte_event_dev_stop(uint8_t dev_id)
1282 struct rte_eventdev *dev;
1284 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1286 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1287 dev = &rte_eventdevs[dev_id];
1288 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1290 if (dev->data->dev_started == 0) {
1291 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1296 dev->data->dev_started = 0;
1297 (*dev->dev_ops->dev_stop)(dev);
1298 rte_eventdev_trace_stop(dev_id);
1299 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1303 rte_event_dev_close(uint8_t dev_id)
1305 struct rte_eventdev *dev;
1307 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1308 dev = &rte_eventdevs[dev_id];
1309 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1311 /* Device must be stopped before it can be closed */
1312 if (dev->data->dev_started == 1) {
1313 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1318 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1319 rte_eventdev_trace_close(dev_id);
1320 return (*dev->dev_ops->dev_close)(dev);
1324 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1327 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1328 const struct rte_memzone *mz;
1331 /* Generate memzone name */
1332 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1333 if (n >= (int)sizeof(mz_name))
1336 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1337 mz = rte_memzone_reserve(mz_name,
1338 sizeof(struct rte_eventdev_data),
1341 mz = rte_memzone_lookup(mz_name);
1347 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1348 memset(*data, 0, sizeof(struct rte_eventdev_data));
1349 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1350 RTE_EVENT_MAX_QUEUES_PER_DEV;
1352 (*data)->links_map[n] =
1353 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1359 static inline uint8_t
1360 eventdev_find_free_device_index(void)
1364 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1365 if (rte_eventdevs[dev_id].attached ==
1366 RTE_EVENTDEV_DETACHED)
1369 return RTE_EVENT_MAX_DEVS;
1372 struct rte_eventdev *
1373 rte_event_pmd_allocate(const char *name, int socket_id)
1375 struct rte_eventdev *eventdev;
1378 if (rte_event_pmd_get_named_dev(name) != NULL) {
1379 RTE_EDEV_LOG_ERR("Event device with name %s already "
1380 "allocated!", name);
1384 dev_id = eventdev_find_free_device_index();
1385 if (dev_id == RTE_EVENT_MAX_DEVS) {
1386 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1390 eventdev = &rte_eventdevs[dev_id];
1392 if (eventdev->data == NULL) {
1393 struct rte_eventdev_data *eventdev_data = NULL;
1396 eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1398 if (retval < 0 || eventdev_data == NULL)
1401 eventdev->data = eventdev_data;
1403 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1405 strlcpy(eventdev->data->name, name,
1406 RTE_EVENTDEV_NAME_MAX_LEN);
1408 eventdev->data->dev_id = dev_id;
1409 eventdev->data->socket_id = socket_id;
1410 eventdev->data->dev_started = 0;
1413 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1414 eventdev_globals.nb_devs++;
1421 rte_event_pmd_release(struct rte_eventdev *eventdev)
1424 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1425 const struct rte_memzone *mz;
1427 if (eventdev == NULL)
1430 event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1431 eventdev->attached = RTE_EVENTDEV_DETACHED;
1432 eventdev_globals.nb_devs--;
1434 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1435 rte_free(eventdev->data->dev_private);
1437 /* Generate memzone name */
1438 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1439 eventdev->data->dev_id);
1440 if (ret >= (int)sizeof(mz_name))
1443 mz = rte_memzone_lookup(mz_name);
1447 ret = rte_memzone_free(mz);
1452 eventdev->data = NULL;
1457 event_dev_probing_finish(struct rte_eventdev *eventdev)
1459 if (eventdev == NULL)
1462 event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1467 handle_dev_list(const char *cmd __rte_unused,
1468 const char *params __rte_unused,
1469 struct rte_tel_data *d)
1472 int ndev = rte_event_dev_count();
1477 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1478 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1479 if (rte_eventdevs[dev_id].attached ==
1480 RTE_EVENTDEV_ATTACHED)
1481 rte_tel_data_add_array_int(d, dev_id);
1488 handle_port_list(const char *cmd __rte_unused,
1490 struct rte_tel_data *d)
1494 struct rte_eventdev *dev;
1497 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1500 dev_id = strtoul(params, &end_param, 10);
1501 if (*end_param != '\0')
1503 "Extra parameters passed to eventdev telemetry command, ignoring");
1505 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1506 dev = &rte_eventdevs[dev_id];
1508 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1509 for (i = 0; i < dev->data->nb_ports; i++)
1510 rte_tel_data_add_array_int(d, i);
1516 handle_queue_list(const char *cmd __rte_unused,
1518 struct rte_tel_data *d)
1522 struct rte_eventdev *dev;
1525 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1528 dev_id = strtoul(params, &end_param, 10);
1529 if (*end_param != '\0')
1531 "Extra parameters passed to eventdev telemetry command, ignoring");
1533 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1534 dev = &rte_eventdevs[dev_id];
1536 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1537 for (i = 0; i < dev->data->nb_queues; i++)
1538 rte_tel_data_add_array_int(d, i);
1544 handle_queue_links(const char *cmd __rte_unused,
1546 struct rte_tel_data *d)
1548 int i, ret, port_id = 0;
1551 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1552 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1553 const char *p_param;
1555 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1558 /* Get dev ID from parameter string */
1559 dev_id = strtoul(params, &end_param, 10);
1560 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1562 p_param = strtok(end_param, ",");
1563 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1566 port_id = strtoul(p_param, &end_param, 10);
1567 p_param = strtok(NULL, "\0");
1568 if (p_param != NULL)
1570 "Extra parameters passed to eventdev telemetry command, ignoring");
1572 ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1576 rte_tel_data_start_dict(d);
1577 for (i = 0; i < ret; i++) {
1580 snprintf(qid_name, 31, "qid_%u", queues[i]);
1581 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1588 eventdev_build_telemetry_data(int dev_id,
1589 enum rte_event_dev_xstats_mode mode,
1591 struct rte_tel_data *d)
1593 struct rte_event_dev_xstats_name *xstat_names;
1596 int i, ret, num_xstats;
1598 num_xstats = rte_event_dev_xstats_names_get(dev_id,
1608 /* use one malloc for names */
1609 xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1611 if (xstat_names == NULL)
1614 ids = malloc((sizeof(unsigned int)) * num_xstats);
1620 values = malloc((sizeof(uint64_t)) * num_xstats);
1621 if (values == NULL) {
1627 ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1628 xstat_names, ids, num_xstats);
1629 if (ret < 0 || ret > num_xstats) {
1636 ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1637 ids, values, num_xstats);
1638 if (ret < 0 || ret > num_xstats) {
1645 rte_tel_data_start_dict(d);
1646 for (i = 0; i < num_xstats; i++)
1647 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1657 handle_dev_xstats(const char *cmd __rte_unused,
1659 struct rte_tel_data *d)
1662 enum rte_event_dev_xstats_mode mode;
1665 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1668 /* Get dev ID from parameter string */
1669 dev_id = strtoul(params, &end_param, 10);
1670 if (*end_param != '\0')
1672 "Extra parameters passed to eventdev telemetry command, ignoring");
1674 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1676 mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1677 return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1681 handle_port_xstats(const char *cmd __rte_unused,
1683 struct rte_tel_data *d)
1686 int port_queue_id = 0;
1687 enum rte_event_dev_xstats_mode mode;
1689 const char *p_param;
1691 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1694 /* Get dev ID from parameter string */
1695 dev_id = strtoul(params, &end_param, 10);
1696 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1698 p_param = strtok(end_param, ",");
1699 mode = RTE_EVENT_DEV_XSTATS_PORT;
1701 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1704 port_queue_id = strtoul(p_param, &end_param, 10);
1706 p_param = strtok(NULL, "\0");
1707 if (p_param != NULL)
1709 "Extra parameters passed to eventdev telemetry command, ignoring");
1711 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1715 handle_queue_xstats(const char *cmd __rte_unused,
1717 struct rte_tel_data *d)
1720 int port_queue_id = 0;
1721 enum rte_event_dev_xstats_mode mode;
1723 const char *p_param;
1725 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1728 /* Get dev ID from parameter string */
1729 dev_id = strtoul(params, &end_param, 10);
1730 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1732 p_param = strtok(end_param, ",");
1733 mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1735 if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1738 port_queue_id = strtoul(p_param, &end_param, 10);
1740 p_param = strtok(NULL, "\0");
1741 if (p_param != NULL)
1743 "Extra parameters passed to eventdev telemetry command, ignoring");
1745 return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1748 RTE_INIT(eventdev_init_telemetry)
1750 rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1751 "Returns list of available eventdevs. Takes no parameters");
1752 rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1753 "Returns list of available ports. Parameter: DevID");
1754 rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1755 "Returns list of available queues. Parameter: DevID");
1757 rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1758 "Returns stats for an eventdev. Parameter: DevID");
1759 rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1760 "Returns stats for an eventdev port. Params: DevID,PortID");
1761 rte_telemetry_register_cmd("/eventdev/queue_xstats",
1762 handle_queue_xstats,
1763 "Returns stats for an eventdev queue. Params: DevID,QueueID");
1764 rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1765 "Returns links for an eventdev port. Params: DevID,QueueID");