1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_byteorder.h>
18 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
33 #include "rte_eventdev.h"
34 #include "rte_eventdev_pmd.h"
36 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
38 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
40 static struct rte_eventdev_global eventdev_globals = {
44 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
46 /* Event dev north bound API implementation */
49 rte_event_dev_count(void)
51 return rte_eventdev_globals->nb_devs;
55 rte_event_dev_get_dev_id(const char *name)
62 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
63 if ((strcmp(rte_event_devices[i].data->name, name)
65 (rte_event_devices[i].attached ==
66 RTE_EVENTDEV_ATTACHED))
72 rte_event_dev_socket_id(uint8_t dev_id)
74 struct rte_eventdev *dev;
76 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77 dev = &rte_eventdevs[dev_id];
79 return dev->data->socket_id;
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
85 struct rte_eventdev *dev;
87 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 dev = &rte_eventdevs[dev_id];
93 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
95 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
98 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
100 dev_info->dev = dev->dev;
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
108 struct rte_eventdev *dev;
110 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
113 dev = &rte_eventdevs[dev_id];
119 return dev->dev_ops->eth_rx_adapter_caps_get ?
120 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
121 &rte_eth_devices[eth_port_id],
126 int __rte_experimental
127 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
129 struct rte_eventdev *dev;
130 const struct rte_event_timer_adapter_ops *ops;
132 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
134 dev = &rte_eventdevs[dev_id];
140 return dev->dev_ops->timer_adapter_caps_get ?
141 (*dev->dev_ops->timer_adapter_caps_get)(dev,
149 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
151 uint8_t old_nb_queues = dev->data->nb_queues;
152 struct rte_event_queue_conf *queues_cfg;
155 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
158 /* First time configuration */
159 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
160 /* Allocate memory to store queue configuration */
161 dev->data->queues_cfg = rte_zmalloc_socket(
162 "eventdev->data->queues_cfg",
163 sizeof(dev->data->queues_cfg[0]) * nb_queues,
164 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
165 if (dev->data->queues_cfg == NULL) {
166 dev->data->nb_queues = 0;
167 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
168 "nb_queues %u", nb_queues);
172 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
173 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
175 for (i = nb_queues; i < old_nb_queues; i++)
176 (*dev->dev_ops->queue_release)(dev, i);
178 /* Re allocate memory to store queue configuration */
179 queues_cfg = dev->data->queues_cfg;
180 queues_cfg = rte_realloc(queues_cfg,
181 sizeof(queues_cfg[0]) * nb_queues,
182 RTE_CACHE_LINE_SIZE);
183 if (queues_cfg == NULL) {
184 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
185 " nb_queues %u", nb_queues);
188 dev->data->queues_cfg = queues_cfg;
190 if (nb_queues > old_nb_queues) {
191 uint8_t new_qs = nb_queues - old_nb_queues;
193 memset(queues_cfg + old_nb_queues, 0,
194 sizeof(queues_cfg[0]) * new_qs);
196 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
197 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
199 for (i = nb_queues; i < old_nb_queues; i++)
200 (*dev->dev_ops->queue_release)(dev, i);
203 dev->data->nb_queues = nb_queues;
207 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
210 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
212 uint8_t old_nb_ports = dev->data->nb_ports;
215 struct rte_event_port_conf *ports_cfg;
218 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
221 /* First time configuration */
222 if (dev->data->ports == NULL && nb_ports != 0) {
223 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
224 sizeof(dev->data->ports[0]) * nb_ports,
225 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
226 if (dev->data->ports == NULL) {
227 dev->data->nb_ports = 0;
228 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
229 "nb_ports %u", nb_ports);
233 /* Allocate memory to store port configurations */
234 dev->data->ports_cfg =
235 rte_zmalloc_socket("eventdev->ports_cfg",
236 sizeof(dev->data->ports_cfg[0]) * nb_ports,
237 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
238 if (dev->data->ports_cfg == NULL) {
239 dev->data->nb_ports = 0;
240 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
241 "nb_ports %u", nb_ports);
245 /* Allocate memory to store queue to port link connection */
246 dev->data->links_map =
247 rte_zmalloc_socket("eventdev->links_map",
248 sizeof(dev->data->links_map[0]) * nb_ports *
249 RTE_EVENT_MAX_QUEUES_PER_DEV,
250 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
251 if (dev->data->links_map == NULL) {
252 dev->data->nb_ports = 0;
253 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
254 "nb_ports %u", nb_ports);
257 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
258 dev->data->links_map[i] =
259 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
260 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
261 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
263 ports = dev->data->ports;
264 ports_cfg = dev->data->ports_cfg;
265 links_map = dev->data->links_map;
267 for (i = nb_ports; i < old_nb_ports; i++)
268 (*dev->dev_ops->port_release)(ports[i]);
270 /* Realloc memory for ports */
271 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
272 RTE_CACHE_LINE_SIZE);
274 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
275 " nb_ports %u", nb_ports);
279 /* Realloc memory for ports_cfg */
280 ports_cfg = rte_realloc(ports_cfg,
281 sizeof(ports_cfg[0]) * nb_ports,
282 RTE_CACHE_LINE_SIZE);
283 if (ports_cfg == NULL) {
284 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
285 " nb_ports %u", nb_ports);
289 /* Realloc memory to store queue to port link connection */
290 links_map = rte_realloc(links_map,
291 sizeof(dev->data->links_map[0]) * nb_ports *
292 RTE_EVENT_MAX_QUEUES_PER_DEV,
293 RTE_CACHE_LINE_SIZE);
294 if (links_map == NULL) {
295 dev->data->nb_ports = 0;
296 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
297 "nb_ports %u", nb_ports);
301 if (nb_ports > old_nb_ports) {
302 uint8_t new_ps = nb_ports - old_nb_ports;
303 unsigned int old_links_map_end =
304 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
305 unsigned int links_map_end =
306 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
308 memset(ports + old_nb_ports, 0,
309 sizeof(ports[0]) * new_ps);
310 memset(ports_cfg + old_nb_ports, 0,
311 sizeof(ports_cfg[0]) * new_ps);
312 for (i = old_links_map_end; i < links_map_end; i++)
314 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
317 dev->data->ports = ports;
318 dev->data->ports_cfg = ports_cfg;
319 dev->data->links_map = links_map;
320 } else if (dev->data->ports != NULL && nb_ports == 0) {
321 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
323 ports = dev->data->ports;
324 for (i = nb_ports; i < old_nb_ports; i++)
325 (*dev->dev_ops->port_release)(ports[i]);
328 dev->data->nb_ports = nb_ports;
333 rte_event_dev_configure(uint8_t dev_id,
334 const struct rte_event_dev_config *dev_conf)
336 struct rte_eventdev *dev;
337 struct rte_event_dev_info info;
340 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
341 dev = &rte_eventdevs[dev_id];
343 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
344 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
346 if (dev->data->dev_started) {
348 "device %d must be stopped to allow configuration", dev_id);
352 if (dev_conf == NULL)
355 (*dev->dev_ops->dev_infos_get)(dev, &info);
357 /* Check dequeue_timeout_ns value is in limit */
358 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
359 if (dev_conf->dequeue_timeout_ns &&
360 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
361 || dev_conf->dequeue_timeout_ns >
362 info.max_dequeue_timeout_ns)) {
363 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
364 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
365 dev_id, dev_conf->dequeue_timeout_ns,
366 info.min_dequeue_timeout_ns,
367 info.max_dequeue_timeout_ns);
372 /* Check nb_events_limit is in limit */
373 if (dev_conf->nb_events_limit > info.max_num_events) {
374 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
375 dev_id, dev_conf->nb_events_limit, info.max_num_events);
379 /* Check nb_event_queues is in limit */
380 if (!dev_conf->nb_event_queues) {
381 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
385 if (dev_conf->nb_event_queues > info.max_event_queues) {
386 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
387 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
391 /* Check nb_event_ports is in limit */
392 if (!dev_conf->nb_event_ports) {
393 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
396 if (dev_conf->nb_event_ports > info.max_event_ports) {
397 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
398 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
402 /* Check nb_event_queue_flows is in limit */
403 if (!dev_conf->nb_event_queue_flows) {
404 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
407 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
408 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
409 dev_id, dev_conf->nb_event_queue_flows,
410 info.max_event_queue_flows);
414 /* Check nb_event_port_dequeue_depth is in limit */
415 if (!dev_conf->nb_event_port_dequeue_depth) {
416 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
420 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
421 (dev_conf->nb_event_port_dequeue_depth >
422 info.max_event_port_dequeue_depth)) {
423 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
424 dev_id, dev_conf->nb_event_port_dequeue_depth,
425 info.max_event_port_dequeue_depth);
429 /* Check nb_event_port_enqueue_depth is in limit */
430 if (!dev_conf->nb_event_port_enqueue_depth) {
431 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
435 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
436 (dev_conf->nb_event_port_enqueue_depth >
437 info.max_event_port_enqueue_depth)) {
438 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
439 dev_id, dev_conf->nb_event_port_enqueue_depth,
440 info.max_event_port_enqueue_depth);
444 /* Copy the dev_conf parameter into the dev structure */
445 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
447 /* Setup new number of queues and reconfigure device. */
448 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
450 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
455 /* Setup new number of ports and reconfigure device. */
456 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
458 rte_event_dev_queue_config(dev, 0);
459 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
464 /* Configure the device */
465 diag = (*dev->dev_ops->dev_configure)(dev);
467 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
468 rte_event_dev_queue_config(dev, 0);
469 rte_event_dev_port_config(dev, 0);
472 dev->data->event_dev_cap = info.event_dev_cap;
477 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
479 if (queue_id < dev->data->nb_queues && queue_id <
480 RTE_EVENT_MAX_QUEUES_PER_DEV)
487 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
488 struct rte_event_queue_conf *queue_conf)
490 struct rte_eventdev *dev;
492 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
493 dev = &rte_eventdevs[dev_id];
495 if (queue_conf == NULL)
498 if (!is_valid_queue(dev, queue_id)) {
499 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
503 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
504 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
505 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
510 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
513 !(queue_conf->event_queue_cfg &
514 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
515 ((queue_conf->event_queue_cfg &
516 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
517 (queue_conf->schedule_type
518 == RTE_SCHED_TYPE_ATOMIC)
526 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
529 !(queue_conf->event_queue_cfg &
530 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
531 ((queue_conf->event_queue_cfg &
532 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
533 (queue_conf->schedule_type
534 == RTE_SCHED_TYPE_ORDERED)
543 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
544 const struct rte_event_queue_conf *queue_conf)
546 struct rte_eventdev *dev;
547 struct rte_event_queue_conf def_conf;
549 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
550 dev = &rte_eventdevs[dev_id];
552 if (!is_valid_queue(dev, queue_id)) {
553 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
557 /* Check nb_atomic_flows limit */
558 if (is_valid_atomic_queue_conf(queue_conf)) {
559 if (queue_conf->nb_atomic_flows == 0 ||
560 queue_conf->nb_atomic_flows >
561 dev->data->dev_conf.nb_event_queue_flows) {
563 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
564 dev_id, queue_id, queue_conf->nb_atomic_flows,
565 dev->data->dev_conf.nb_event_queue_flows);
570 /* Check nb_atomic_order_sequences limit */
571 if (is_valid_ordered_queue_conf(queue_conf)) {
572 if (queue_conf->nb_atomic_order_sequences == 0 ||
573 queue_conf->nb_atomic_order_sequences >
574 dev->data->dev_conf.nb_event_queue_flows) {
576 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
577 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
578 dev->data->dev_conf.nb_event_queue_flows);
583 if (dev->data->dev_started) {
585 "device %d must be stopped to allow queue setup", dev_id);
589 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
591 if (queue_conf == NULL) {
592 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
594 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
595 queue_conf = &def_conf;
598 dev->data->queues_cfg[queue_id] = *queue_conf;
599 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
603 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
605 if (port_id < dev->data->nb_ports)
612 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
613 struct rte_event_port_conf *port_conf)
615 struct rte_eventdev *dev;
617 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
618 dev = &rte_eventdevs[dev_id];
620 if (port_conf == NULL)
623 if (!is_valid_port(dev, port_id)) {
624 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
628 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
629 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
630 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
635 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
636 const struct rte_event_port_conf *port_conf)
638 struct rte_eventdev *dev;
639 struct rte_event_port_conf def_conf;
642 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
643 dev = &rte_eventdevs[dev_id];
645 if (!is_valid_port(dev, port_id)) {
646 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
650 /* Check new_event_threshold limit */
651 if ((port_conf && !port_conf->new_event_threshold) ||
652 (port_conf && port_conf->new_event_threshold >
653 dev->data->dev_conf.nb_events_limit)) {
655 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
656 dev_id, port_id, port_conf->new_event_threshold,
657 dev->data->dev_conf.nb_events_limit);
661 /* Check dequeue_depth limit */
662 if ((port_conf && !port_conf->dequeue_depth) ||
663 (port_conf && port_conf->dequeue_depth >
664 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
666 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
667 dev_id, port_id, port_conf->dequeue_depth,
668 dev->data->dev_conf.nb_event_port_dequeue_depth);
672 /* Check enqueue_depth limit */
673 if ((port_conf && !port_conf->enqueue_depth) ||
674 (port_conf && port_conf->enqueue_depth >
675 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
677 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
678 dev_id, port_id, port_conf->enqueue_depth,
679 dev->data->dev_conf.nb_event_port_enqueue_depth);
683 if (port_conf && port_conf->disable_implicit_release &&
684 !(dev->data->event_dev_cap &
685 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
687 "dev%d port%d Implicit release disable not supported",
692 if (dev->data->dev_started) {
694 "device %d must be stopped to allow port setup", dev_id);
698 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
700 if (port_conf == NULL) {
701 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
703 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
704 port_conf = &def_conf;
707 dev->data->ports_cfg[port_id] = *port_conf;
709 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
711 /* Unlink all the queues from this port(default state after setup) */
713 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
722 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
723 uint32_t *attr_value)
725 struct rte_eventdev *dev;
729 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
730 dev = &rte_eventdevs[dev_id];
733 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
734 *attr_value = dev->data->nb_ports;
736 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
737 *attr_value = dev->data->nb_queues;
739 case RTE_EVENT_DEV_ATTR_STARTED:
740 *attr_value = dev->data->dev_started;
750 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
751 uint32_t *attr_value)
753 struct rte_eventdev *dev;
758 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
759 dev = &rte_eventdevs[dev_id];
760 if (!is_valid_port(dev, port_id)) {
761 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
766 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
767 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
769 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
770 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
772 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
773 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
782 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
783 uint32_t *attr_value)
785 struct rte_event_queue_conf *conf;
786 struct rte_eventdev *dev;
791 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
792 dev = &rte_eventdevs[dev_id];
793 if (!is_valid_queue(dev, queue_id)) {
794 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
798 conf = &dev->data->queues_cfg[queue_id];
801 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
802 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
803 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
804 *attr_value = conf->priority;
806 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
807 *attr_value = conf->nb_atomic_flows;
809 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
810 *attr_value = conf->nb_atomic_order_sequences;
812 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
813 *attr_value = conf->event_queue_cfg;
815 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
816 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
819 *attr_value = conf->schedule_type;
828 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
829 const uint8_t queues[], const uint8_t priorities[],
832 struct rte_eventdev *dev;
833 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
834 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
838 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
839 dev = &rte_eventdevs[dev_id];
841 if (*dev->dev_ops->port_link == NULL) {
842 RTE_PMD_DEBUG_TRACE("Function not supported\n");
843 rte_errno = -ENOTSUP;
847 if (!is_valid_port(dev, port_id)) {
848 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
853 if (queues == NULL) {
854 for (i = 0; i < dev->data->nb_queues; i++)
857 queues = queues_list;
858 nb_links = dev->data->nb_queues;
861 if (priorities == NULL) {
862 for (i = 0; i < nb_links; i++)
863 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
865 priorities = priorities_list;
868 for (i = 0; i < nb_links; i++)
869 if (queues[i] >= dev->data->nb_queues) {
874 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
875 queues, priorities, nb_links);
879 links_map = dev->data->links_map;
880 /* Point links_map to this port specific area */
881 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
882 for (i = 0; i < diag; i++)
883 links_map[queues[i]] = (uint8_t)priorities[i];
889 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
890 uint8_t queues[], uint16_t nb_unlinks)
892 struct rte_eventdev *dev;
893 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
897 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
898 dev = &rte_eventdevs[dev_id];
900 if (*dev->dev_ops->port_unlink == NULL) {
901 RTE_PMD_DEBUG_TRACE("Function not supported\n");
902 rte_errno = -ENOTSUP;
906 if (!is_valid_port(dev, port_id)) {
907 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
912 links_map = dev->data->links_map;
913 /* Point links_map to this port specific area */
914 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
916 if (queues == NULL) {
918 for (i = 0; i < dev->data->nb_queues; i++) {
920 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
927 for (j = 0; j < nb_unlinks; j++) {
928 if (links_map[queues[j]] ==
929 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
935 for (i = 0; i < nb_unlinks; i++)
936 if (queues[i] >= dev->data->nb_queues) {
941 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
947 for (i = 0; i < diag; i++)
948 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
954 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
955 uint8_t queues[], uint8_t priorities[])
957 struct rte_eventdev *dev;
961 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
962 dev = &rte_eventdevs[dev_id];
963 if (!is_valid_port(dev, port_id)) {
964 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
968 links_map = dev->data->links_map;
969 /* Point links_map to this port specific area */
970 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
971 for (i = 0; i < dev->data->nb_queues; i++) {
972 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
974 priorities[count] = (uint8_t)links_map[i];
982 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
983 uint64_t *timeout_ticks)
985 struct rte_eventdev *dev;
987 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
988 dev = &rte_eventdevs[dev_id];
989 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
991 if (timeout_ticks == NULL)
994 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
998 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1000 struct rte_eventdev *dev;
1002 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1003 dev = &rte_eventdevs[dev_id];
1005 if (service_id == NULL)
1008 if (dev->data->service_inited)
1009 *service_id = dev->data->service_id;
1011 return dev->data->service_inited ? 0 : -ESRCH;
1015 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1017 struct rte_eventdev *dev;
1019 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1020 dev = &rte_eventdevs[dev_id];
1021 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1023 (*dev->dev_ops->dump)(dev, f);
1029 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1030 uint8_t queue_port_id)
1032 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1033 if (dev->dev_ops->xstats_get_names != NULL)
1034 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1041 rte_event_dev_xstats_names_get(uint8_t dev_id,
1042 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1043 struct rte_event_dev_xstats_name *xstats_names,
1044 unsigned int *ids, unsigned int size)
1046 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1047 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1049 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1050 (int)size < cnt_expected_entries)
1051 return cnt_expected_entries;
1053 /* dev_id checked above */
1054 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1056 if (dev->dev_ops->xstats_get_names != NULL)
1057 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1058 queue_port_id, xstats_names, ids, size);
1063 /* retrieve eventdev extended statistics */
1065 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1066 uint8_t queue_port_id, const unsigned int ids[],
1067 uint64_t values[], unsigned int n)
1069 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1070 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1072 /* implemented by the driver */
1073 if (dev->dev_ops->xstats_get != NULL)
1074 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1080 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1083 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1084 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1085 unsigned int temp = -1;
1088 *id = (unsigned int)-1;
1090 id = &temp; /* ensure driver never gets a NULL value */
1092 /* implemented by driver */
1093 if (dev->dev_ops->xstats_get_by_name != NULL)
1094 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1098 int rte_event_dev_xstats_reset(uint8_t dev_id,
1099 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1100 const uint32_t ids[], uint32_t nb_ids)
1102 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1103 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1105 if (dev->dev_ops->xstats_reset != NULL)
1106 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1111 int rte_event_dev_selftest(uint8_t dev_id)
1113 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1114 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1116 if (dev->dev_ops->dev_selftest != NULL)
1117 return (*dev->dev_ops->dev_selftest)();
1122 rte_event_dev_start(uint8_t dev_id)
1124 struct rte_eventdev *dev;
1127 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1129 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1130 dev = &rte_eventdevs[dev_id];
1131 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1133 if (dev->data->dev_started != 0) {
1134 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1139 diag = (*dev->dev_ops->dev_start)(dev);
1141 dev->data->dev_started = 1;
1149 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1150 eventdev_stop_flush_t callback, void *userdata)
1152 struct rte_eventdev *dev;
1154 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1156 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1157 dev = &rte_eventdevs[dev_id];
1159 dev->dev_ops->dev_stop_flush = callback;
1160 dev->data->dev_stop_flush_arg = userdata;
1166 rte_event_dev_stop(uint8_t dev_id)
1168 struct rte_eventdev *dev;
1170 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1172 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1173 dev = &rte_eventdevs[dev_id];
1174 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1176 if (dev->data->dev_started == 0) {
1177 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1182 dev->data->dev_started = 0;
1183 (*dev->dev_ops->dev_stop)(dev);
1187 rte_event_dev_close(uint8_t dev_id)
1189 struct rte_eventdev *dev;
1191 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1192 dev = &rte_eventdevs[dev_id];
1193 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1195 /* Device must be stopped before it can be closed */
1196 if (dev->data->dev_started == 1) {
1197 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1202 return (*dev->dev_ops->dev_close)(dev);
1206 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1209 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1210 const struct rte_memzone *mz;
1213 /* Generate memzone name */
1214 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1215 if (n >= (int)sizeof(mz_name))
1218 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1219 mz = rte_memzone_reserve(mz_name,
1220 sizeof(struct rte_eventdev_data),
1223 mz = rte_memzone_lookup(mz_name);
1229 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1230 memset(*data, 0, sizeof(struct rte_eventdev_data));
1235 static inline uint8_t
1236 rte_eventdev_find_free_device_index(void)
1240 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1241 if (rte_eventdevs[dev_id].attached ==
1242 RTE_EVENTDEV_DETACHED)
1245 return RTE_EVENT_MAX_DEVS;
1248 struct rte_eventdev *
1249 rte_event_pmd_allocate(const char *name, int socket_id)
1251 struct rte_eventdev *eventdev;
1254 if (rte_event_pmd_get_named_dev(name) != NULL) {
1255 RTE_EDEV_LOG_ERR("Event device with name %s already "
1256 "allocated!", name);
1260 dev_id = rte_eventdev_find_free_device_index();
1261 if (dev_id == RTE_EVENT_MAX_DEVS) {
1262 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1266 eventdev = &rte_eventdevs[dev_id];
1268 if (eventdev->data == NULL) {
1269 struct rte_eventdev_data *eventdev_data = NULL;
1271 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1274 if (retval < 0 || eventdev_data == NULL)
1277 eventdev->data = eventdev_data;
1279 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1282 eventdev->data->dev_id = dev_id;
1283 eventdev->data->socket_id = socket_id;
1284 eventdev->data->dev_started = 0;
1286 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1288 eventdev_globals.nb_devs++;
1295 rte_event_pmd_release(struct rte_eventdev *eventdev)
1298 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1299 const struct rte_memzone *mz;
1301 if (eventdev == NULL)
1304 eventdev->attached = RTE_EVENTDEV_DETACHED;
1305 eventdev_globals.nb_devs--;
1307 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1308 rte_free(eventdev->data->dev_private);
1310 /* Generate memzone name */
1311 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1312 eventdev->data->dev_id);
1313 if (ret >= (int)sizeof(mz_name))
1316 mz = rte_memzone_lookup(mz_name);
1320 ret = rte_memzone_free(mz);
1325 eventdev->data = NULL;