1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_byteorder.h>
18 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32 #include <rte_cryptodev.h>
33 #include <rte_cryptodev_pmd.h>
35 #include "rte_eventdev.h"
36 #include "rte_eventdev_pmd.h"
38 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
40 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
42 static struct rte_eventdev_global eventdev_globals = {
46 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
48 /* Event dev north bound API implementation */
51 rte_event_dev_count(void)
53 return rte_eventdev_globals->nb_devs;
57 rte_event_dev_get_dev_id(const char *name)
64 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
65 if ((strcmp(rte_event_devices[i].data->name, name)
67 (rte_event_devices[i].attached ==
68 RTE_EVENTDEV_ATTACHED))
74 rte_event_dev_socket_id(uint8_t dev_id)
76 struct rte_eventdev *dev;
78 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
79 dev = &rte_eventdevs[dev_id];
81 return dev->data->socket_id;
85 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
87 struct rte_eventdev *dev;
89 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
90 dev = &rte_eventdevs[dev_id];
95 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
97 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
98 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
100 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
102 dev_info->dev = dev->dev;
107 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
110 struct rte_eventdev *dev;
112 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
113 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
115 dev = &rte_eventdevs[dev_id];
121 return dev->dev_ops->eth_rx_adapter_caps_get ?
122 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
123 &rte_eth_devices[eth_port_id],
128 int __rte_experimental
129 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
131 struct rte_eventdev *dev;
132 const struct rte_event_timer_adapter_ops *ops;
134 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
136 dev = &rte_eventdevs[dev_id];
142 return dev->dev_ops->timer_adapter_caps_get ?
143 (*dev->dev_ops->timer_adapter_caps_get)(dev,
150 int __rte_experimental
151 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
154 struct rte_eventdev *dev;
155 struct rte_cryptodev *cdev;
157 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
158 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
161 dev = &rte_eventdevs[dev_id];
162 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
168 return dev->dev_ops->crypto_adapter_caps_get ?
169 (*dev->dev_ops->crypto_adapter_caps_get)
170 (dev, cdev, caps) : -ENOTSUP;
174 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
176 uint8_t old_nb_queues = dev->data->nb_queues;
177 struct rte_event_queue_conf *queues_cfg;
180 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
183 /* First time configuration */
184 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
185 /* Allocate memory to store queue configuration */
186 dev->data->queues_cfg = rte_zmalloc_socket(
187 "eventdev->data->queues_cfg",
188 sizeof(dev->data->queues_cfg[0]) * nb_queues,
189 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
190 if (dev->data->queues_cfg == NULL) {
191 dev->data->nb_queues = 0;
192 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
193 "nb_queues %u", nb_queues);
197 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
198 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
200 for (i = nb_queues; i < old_nb_queues; i++)
201 (*dev->dev_ops->queue_release)(dev, i);
203 /* Re allocate memory to store queue configuration */
204 queues_cfg = dev->data->queues_cfg;
205 queues_cfg = rte_realloc(queues_cfg,
206 sizeof(queues_cfg[0]) * nb_queues,
207 RTE_CACHE_LINE_SIZE);
208 if (queues_cfg == NULL) {
209 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
210 " nb_queues %u", nb_queues);
213 dev->data->queues_cfg = queues_cfg;
215 if (nb_queues > old_nb_queues) {
216 uint8_t new_qs = nb_queues - old_nb_queues;
218 memset(queues_cfg + old_nb_queues, 0,
219 sizeof(queues_cfg[0]) * new_qs);
221 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
222 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
224 for (i = nb_queues; i < old_nb_queues; i++)
225 (*dev->dev_ops->queue_release)(dev, i);
228 dev->data->nb_queues = nb_queues;
232 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
235 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
237 uint8_t old_nb_ports = dev->data->nb_ports;
240 struct rte_event_port_conf *ports_cfg;
243 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
246 /* First time configuration */
247 if (dev->data->ports == NULL && nb_ports != 0) {
248 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
249 sizeof(dev->data->ports[0]) * nb_ports,
250 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
251 if (dev->data->ports == NULL) {
252 dev->data->nb_ports = 0;
253 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
254 "nb_ports %u", nb_ports);
258 /* Allocate memory to store port configurations */
259 dev->data->ports_cfg =
260 rte_zmalloc_socket("eventdev->ports_cfg",
261 sizeof(dev->data->ports_cfg[0]) * nb_ports,
262 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
263 if (dev->data->ports_cfg == NULL) {
264 dev->data->nb_ports = 0;
265 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
266 "nb_ports %u", nb_ports);
270 /* Allocate memory to store queue to port link connection */
271 dev->data->links_map =
272 rte_zmalloc_socket("eventdev->links_map",
273 sizeof(dev->data->links_map[0]) * nb_ports *
274 RTE_EVENT_MAX_QUEUES_PER_DEV,
275 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
276 if (dev->data->links_map == NULL) {
277 dev->data->nb_ports = 0;
278 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
279 "nb_ports %u", nb_ports);
282 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
283 dev->data->links_map[i] =
284 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
285 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
288 ports = dev->data->ports;
289 ports_cfg = dev->data->ports_cfg;
290 links_map = dev->data->links_map;
292 for (i = nb_ports; i < old_nb_ports; i++)
293 (*dev->dev_ops->port_release)(ports[i]);
295 /* Realloc memory for ports */
296 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
297 RTE_CACHE_LINE_SIZE);
299 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
300 " nb_ports %u", nb_ports);
304 /* Realloc memory for ports_cfg */
305 ports_cfg = rte_realloc(ports_cfg,
306 sizeof(ports_cfg[0]) * nb_ports,
307 RTE_CACHE_LINE_SIZE);
308 if (ports_cfg == NULL) {
309 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
310 " nb_ports %u", nb_ports);
314 /* Realloc memory to store queue to port link connection */
315 links_map = rte_realloc(links_map,
316 sizeof(dev->data->links_map[0]) * nb_ports *
317 RTE_EVENT_MAX_QUEUES_PER_DEV,
318 RTE_CACHE_LINE_SIZE);
319 if (links_map == NULL) {
320 dev->data->nb_ports = 0;
321 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
322 "nb_ports %u", nb_ports);
326 if (nb_ports > old_nb_ports) {
327 uint8_t new_ps = nb_ports - old_nb_ports;
328 unsigned int old_links_map_end =
329 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
330 unsigned int links_map_end =
331 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
333 memset(ports + old_nb_ports, 0,
334 sizeof(ports[0]) * new_ps);
335 memset(ports_cfg + old_nb_ports, 0,
336 sizeof(ports_cfg[0]) * new_ps);
337 for (i = old_links_map_end; i < links_map_end; i++)
339 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
342 dev->data->ports = ports;
343 dev->data->ports_cfg = ports_cfg;
344 dev->data->links_map = links_map;
345 } else if (dev->data->ports != NULL && nb_ports == 0) {
346 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
348 ports = dev->data->ports;
349 for (i = nb_ports; i < old_nb_ports; i++)
350 (*dev->dev_ops->port_release)(ports[i]);
353 dev->data->nb_ports = nb_ports;
358 rte_event_dev_configure(uint8_t dev_id,
359 const struct rte_event_dev_config *dev_conf)
361 struct rte_eventdev *dev;
362 struct rte_event_dev_info info;
365 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
366 dev = &rte_eventdevs[dev_id];
368 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
369 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
371 if (dev->data->dev_started) {
373 "device %d must be stopped to allow configuration", dev_id);
377 if (dev_conf == NULL)
380 (*dev->dev_ops->dev_infos_get)(dev, &info);
382 /* Check dequeue_timeout_ns value is in limit */
383 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
384 if (dev_conf->dequeue_timeout_ns &&
385 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
386 || dev_conf->dequeue_timeout_ns >
387 info.max_dequeue_timeout_ns)) {
388 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
389 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
390 dev_id, dev_conf->dequeue_timeout_ns,
391 info.min_dequeue_timeout_ns,
392 info.max_dequeue_timeout_ns);
397 /* Check nb_events_limit is in limit */
398 if (dev_conf->nb_events_limit > info.max_num_events) {
399 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
400 dev_id, dev_conf->nb_events_limit, info.max_num_events);
404 /* Check nb_event_queues is in limit */
405 if (!dev_conf->nb_event_queues) {
406 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
410 if (dev_conf->nb_event_queues > info.max_event_queues) {
411 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
412 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
416 /* Check nb_event_ports is in limit */
417 if (!dev_conf->nb_event_ports) {
418 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
421 if (dev_conf->nb_event_ports > info.max_event_ports) {
422 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
423 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
427 /* Check nb_event_queue_flows is in limit */
428 if (!dev_conf->nb_event_queue_flows) {
429 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
432 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
433 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
434 dev_id, dev_conf->nb_event_queue_flows,
435 info.max_event_queue_flows);
439 /* Check nb_event_port_dequeue_depth is in limit */
440 if (!dev_conf->nb_event_port_dequeue_depth) {
441 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
445 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
446 (dev_conf->nb_event_port_dequeue_depth >
447 info.max_event_port_dequeue_depth)) {
448 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
449 dev_id, dev_conf->nb_event_port_dequeue_depth,
450 info.max_event_port_dequeue_depth);
454 /* Check nb_event_port_enqueue_depth is in limit */
455 if (!dev_conf->nb_event_port_enqueue_depth) {
456 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
460 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
461 (dev_conf->nb_event_port_enqueue_depth >
462 info.max_event_port_enqueue_depth)) {
463 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
464 dev_id, dev_conf->nb_event_port_enqueue_depth,
465 info.max_event_port_enqueue_depth);
469 /* Copy the dev_conf parameter into the dev structure */
470 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
472 /* Setup new number of queues and reconfigure device. */
473 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
475 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
480 /* Setup new number of ports and reconfigure device. */
481 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
483 rte_event_dev_queue_config(dev, 0);
484 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
489 /* Configure the device */
490 diag = (*dev->dev_ops->dev_configure)(dev);
492 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
493 rte_event_dev_queue_config(dev, 0);
494 rte_event_dev_port_config(dev, 0);
497 dev->data->event_dev_cap = info.event_dev_cap;
502 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
504 if (queue_id < dev->data->nb_queues && queue_id <
505 RTE_EVENT_MAX_QUEUES_PER_DEV)
512 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
513 struct rte_event_queue_conf *queue_conf)
515 struct rte_eventdev *dev;
517 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
518 dev = &rte_eventdevs[dev_id];
520 if (queue_conf == NULL)
523 if (!is_valid_queue(dev, queue_id)) {
524 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
528 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
529 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
530 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
535 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
538 !(queue_conf->event_queue_cfg &
539 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
540 ((queue_conf->event_queue_cfg &
541 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
542 (queue_conf->schedule_type
543 == RTE_SCHED_TYPE_ATOMIC)
551 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
554 !(queue_conf->event_queue_cfg &
555 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
556 ((queue_conf->event_queue_cfg &
557 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
558 (queue_conf->schedule_type
559 == RTE_SCHED_TYPE_ORDERED)
568 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
569 const struct rte_event_queue_conf *queue_conf)
571 struct rte_eventdev *dev;
572 struct rte_event_queue_conf def_conf;
574 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
575 dev = &rte_eventdevs[dev_id];
577 if (!is_valid_queue(dev, queue_id)) {
578 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
582 /* Check nb_atomic_flows limit */
583 if (is_valid_atomic_queue_conf(queue_conf)) {
584 if (queue_conf->nb_atomic_flows == 0 ||
585 queue_conf->nb_atomic_flows >
586 dev->data->dev_conf.nb_event_queue_flows) {
588 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
589 dev_id, queue_id, queue_conf->nb_atomic_flows,
590 dev->data->dev_conf.nb_event_queue_flows);
595 /* Check nb_atomic_order_sequences limit */
596 if (is_valid_ordered_queue_conf(queue_conf)) {
597 if (queue_conf->nb_atomic_order_sequences == 0 ||
598 queue_conf->nb_atomic_order_sequences >
599 dev->data->dev_conf.nb_event_queue_flows) {
601 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
602 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
603 dev->data->dev_conf.nb_event_queue_flows);
608 if (dev->data->dev_started) {
610 "device %d must be stopped to allow queue setup", dev_id);
614 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
616 if (queue_conf == NULL) {
617 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
619 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
620 queue_conf = &def_conf;
623 dev->data->queues_cfg[queue_id] = *queue_conf;
624 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
628 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
630 if (port_id < dev->data->nb_ports)
637 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
638 struct rte_event_port_conf *port_conf)
640 struct rte_eventdev *dev;
642 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
643 dev = &rte_eventdevs[dev_id];
645 if (port_conf == NULL)
648 if (!is_valid_port(dev, port_id)) {
649 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
653 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
654 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
655 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
660 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
661 const struct rte_event_port_conf *port_conf)
663 struct rte_eventdev *dev;
664 struct rte_event_port_conf def_conf;
667 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
668 dev = &rte_eventdevs[dev_id];
670 if (!is_valid_port(dev, port_id)) {
671 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
675 /* Check new_event_threshold limit */
676 if ((port_conf && !port_conf->new_event_threshold) ||
677 (port_conf && port_conf->new_event_threshold >
678 dev->data->dev_conf.nb_events_limit)) {
680 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
681 dev_id, port_id, port_conf->new_event_threshold,
682 dev->data->dev_conf.nb_events_limit);
686 /* Check dequeue_depth limit */
687 if ((port_conf && !port_conf->dequeue_depth) ||
688 (port_conf && port_conf->dequeue_depth >
689 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
691 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
692 dev_id, port_id, port_conf->dequeue_depth,
693 dev->data->dev_conf.nb_event_port_dequeue_depth);
697 /* Check enqueue_depth limit */
698 if ((port_conf && !port_conf->enqueue_depth) ||
699 (port_conf && port_conf->enqueue_depth >
700 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
702 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
703 dev_id, port_id, port_conf->enqueue_depth,
704 dev->data->dev_conf.nb_event_port_enqueue_depth);
708 if (port_conf && port_conf->disable_implicit_release &&
709 !(dev->data->event_dev_cap &
710 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
712 "dev%d port%d Implicit release disable not supported",
717 if (dev->data->dev_started) {
719 "device %d must be stopped to allow port setup", dev_id);
723 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
725 if (port_conf == NULL) {
726 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
728 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
729 port_conf = &def_conf;
732 dev->data->ports_cfg[port_id] = *port_conf;
734 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
736 /* Unlink all the queues from this port(default state after setup) */
738 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
747 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
748 uint32_t *attr_value)
750 struct rte_eventdev *dev;
754 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
755 dev = &rte_eventdevs[dev_id];
758 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
759 *attr_value = dev->data->nb_ports;
761 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
762 *attr_value = dev->data->nb_queues;
764 case RTE_EVENT_DEV_ATTR_STARTED:
765 *attr_value = dev->data->dev_started;
775 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
776 uint32_t *attr_value)
778 struct rte_eventdev *dev;
783 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
784 dev = &rte_eventdevs[dev_id];
785 if (!is_valid_port(dev, port_id)) {
786 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
791 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
792 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
794 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
795 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
797 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
798 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
807 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
808 uint32_t *attr_value)
810 struct rte_event_queue_conf *conf;
811 struct rte_eventdev *dev;
816 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
817 dev = &rte_eventdevs[dev_id];
818 if (!is_valid_queue(dev, queue_id)) {
819 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
823 conf = &dev->data->queues_cfg[queue_id];
826 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
827 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
828 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
829 *attr_value = conf->priority;
831 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
832 *attr_value = conf->nb_atomic_flows;
834 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
835 *attr_value = conf->nb_atomic_order_sequences;
837 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
838 *attr_value = conf->event_queue_cfg;
840 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
841 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
844 *attr_value = conf->schedule_type;
853 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
854 const uint8_t queues[], const uint8_t priorities[],
857 struct rte_eventdev *dev;
858 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
859 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
863 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
864 dev = &rte_eventdevs[dev_id];
866 if (*dev->dev_ops->port_link == NULL) {
867 RTE_PMD_DEBUG_TRACE("Function not supported\n");
868 rte_errno = -ENOTSUP;
872 if (!is_valid_port(dev, port_id)) {
873 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
878 if (queues == NULL) {
879 for (i = 0; i < dev->data->nb_queues; i++)
882 queues = queues_list;
883 nb_links = dev->data->nb_queues;
886 if (priorities == NULL) {
887 for (i = 0; i < nb_links; i++)
888 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
890 priorities = priorities_list;
893 for (i = 0; i < nb_links; i++)
894 if (queues[i] >= dev->data->nb_queues) {
899 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
900 queues, priorities, nb_links);
904 links_map = dev->data->links_map;
905 /* Point links_map to this port specific area */
906 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
907 for (i = 0; i < diag; i++)
908 links_map[queues[i]] = (uint8_t)priorities[i];
914 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
915 uint8_t queues[], uint16_t nb_unlinks)
917 struct rte_eventdev *dev;
918 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
922 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
923 dev = &rte_eventdevs[dev_id];
925 if (*dev->dev_ops->port_unlink == NULL) {
926 RTE_PMD_DEBUG_TRACE("Function not supported\n");
927 rte_errno = -ENOTSUP;
931 if (!is_valid_port(dev, port_id)) {
932 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
937 links_map = dev->data->links_map;
938 /* Point links_map to this port specific area */
939 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
941 if (queues == NULL) {
943 for (i = 0; i < dev->data->nb_queues; i++) {
945 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
952 for (j = 0; j < nb_unlinks; j++) {
953 if (links_map[queues[j]] ==
954 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
960 for (i = 0; i < nb_unlinks; i++)
961 if (queues[i] >= dev->data->nb_queues) {
966 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
972 for (i = 0; i < diag; i++)
973 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
979 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
980 uint8_t queues[], uint8_t priorities[])
982 struct rte_eventdev *dev;
986 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
987 dev = &rte_eventdevs[dev_id];
988 if (!is_valid_port(dev, port_id)) {
989 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
993 links_map = dev->data->links_map;
994 /* Point links_map to this port specific area */
995 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
996 for (i = 0; i < dev->data->nb_queues; i++) {
997 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
999 priorities[count] = (uint8_t)links_map[i];
1007 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1008 uint64_t *timeout_ticks)
1010 struct rte_eventdev *dev;
1012 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1013 dev = &rte_eventdevs[dev_id];
1014 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1016 if (timeout_ticks == NULL)
1019 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1023 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1025 struct rte_eventdev *dev;
1027 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1028 dev = &rte_eventdevs[dev_id];
1030 if (service_id == NULL)
1033 if (dev->data->service_inited)
1034 *service_id = dev->data->service_id;
1036 return dev->data->service_inited ? 0 : -ESRCH;
1040 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1042 struct rte_eventdev *dev;
1044 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1045 dev = &rte_eventdevs[dev_id];
1046 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1048 (*dev->dev_ops->dump)(dev, f);
1054 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1055 uint8_t queue_port_id)
1057 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1058 if (dev->dev_ops->xstats_get_names != NULL)
1059 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1066 rte_event_dev_xstats_names_get(uint8_t dev_id,
1067 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1068 struct rte_event_dev_xstats_name *xstats_names,
1069 unsigned int *ids, unsigned int size)
1071 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1072 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1074 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1075 (int)size < cnt_expected_entries)
1076 return cnt_expected_entries;
1078 /* dev_id checked above */
1079 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1081 if (dev->dev_ops->xstats_get_names != NULL)
1082 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1083 queue_port_id, xstats_names, ids, size);
1088 /* retrieve eventdev extended statistics */
1090 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1091 uint8_t queue_port_id, const unsigned int ids[],
1092 uint64_t values[], unsigned int n)
1094 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1095 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1097 /* implemented by the driver */
1098 if (dev->dev_ops->xstats_get != NULL)
1099 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1105 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1108 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1109 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1110 unsigned int temp = -1;
1113 *id = (unsigned int)-1;
1115 id = &temp; /* ensure driver never gets a NULL value */
1117 /* implemented by driver */
1118 if (dev->dev_ops->xstats_get_by_name != NULL)
1119 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1123 int rte_event_dev_xstats_reset(uint8_t dev_id,
1124 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1125 const uint32_t ids[], uint32_t nb_ids)
1127 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1128 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1130 if (dev->dev_ops->xstats_reset != NULL)
1131 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1136 int rte_event_dev_selftest(uint8_t dev_id)
1138 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1139 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1141 if (dev->dev_ops->dev_selftest != NULL)
1142 return (*dev->dev_ops->dev_selftest)();
1147 rte_event_dev_start(uint8_t dev_id)
1149 struct rte_eventdev *dev;
1152 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1154 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1155 dev = &rte_eventdevs[dev_id];
1156 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1158 if (dev->data->dev_started != 0) {
1159 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1164 diag = (*dev->dev_ops->dev_start)(dev);
1166 dev->data->dev_started = 1;
1174 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1175 eventdev_stop_flush_t callback, void *userdata)
1177 struct rte_eventdev *dev;
1179 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1181 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1182 dev = &rte_eventdevs[dev_id];
1184 dev->dev_ops->dev_stop_flush = callback;
1185 dev->data->dev_stop_flush_arg = userdata;
1191 rte_event_dev_stop(uint8_t dev_id)
1193 struct rte_eventdev *dev;
1195 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1197 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1198 dev = &rte_eventdevs[dev_id];
1199 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1201 if (dev->data->dev_started == 0) {
1202 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1207 dev->data->dev_started = 0;
1208 (*dev->dev_ops->dev_stop)(dev);
1212 rte_event_dev_close(uint8_t dev_id)
1214 struct rte_eventdev *dev;
1216 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1217 dev = &rte_eventdevs[dev_id];
1218 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1220 /* Device must be stopped before it can be closed */
1221 if (dev->data->dev_started == 1) {
1222 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1227 return (*dev->dev_ops->dev_close)(dev);
1231 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1234 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1235 const struct rte_memzone *mz;
1238 /* Generate memzone name */
1239 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1240 if (n >= (int)sizeof(mz_name))
1243 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1244 mz = rte_memzone_reserve(mz_name,
1245 sizeof(struct rte_eventdev_data),
1248 mz = rte_memzone_lookup(mz_name);
1254 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1255 memset(*data, 0, sizeof(struct rte_eventdev_data));
1260 static inline uint8_t
1261 rte_eventdev_find_free_device_index(void)
1265 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1266 if (rte_eventdevs[dev_id].attached ==
1267 RTE_EVENTDEV_DETACHED)
1270 return RTE_EVENT_MAX_DEVS;
1273 struct rte_eventdev *
1274 rte_event_pmd_allocate(const char *name, int socket_id)
1276 struct rte_eventdev *eventdev;
1279 if (rte_event_pmd_get_named_dev(name) != NULL) {
1280 RTE_EDEV_LOG_ERR("Event device with name %s already "
1281 "allocated!", name);
1285 dev_id = rte_eventdev_find_free_device_index();
1286 if (dev_id == RTE_EVENT_MAX_DEVS) {
1287 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1291 eventdev = &rte_eventdevs[dev_id];
1293 if (eventdev->data == NULL) {
1294 struct rte_eventdev_data *eventdev_data = NULL;
1296 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1299 if (retval < 0 || eventdev_data == NULL)
1302 eventdev->data = eventdev_data;
1304 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1307 eventdev->data->dev_id = dev_id;
1308 eventdev->data->socket_id = socket_id;
1309 eventdev->data->dev_started = 0;
1311 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1313 eventdev_globals.nb_devs++;
1320 rte_event_pmd_release(struct rte_eventdev *eventdev)
1323 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1324 const struct rte_memzone *mz;
1326 if (eventdev == NULL)
1329 eventdev->attached = RTE_EVENTDEV_DETACHED;
1330 eventdev_globals.nb_devs--;
1332 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1333 rte_free(eventdev->data->dev_private);
1335 /* Generate memzone name */
1336 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1337 eventdev->data->dev_id);
1338 if (ret >= (int)sizeof(mz_name))
1341 mz = rte_memzone_lookup(mz_name);
1345 ret = rte_memzone_free(mz);
1350 eventdev->data = NULL;