1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_byteorder.h>
18 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32 #include <rte_cryptodev.h>
33 #include <rte_cryptodev_pmd.h>
35 #include "rte_eventdev.h"
36 #include "rte_eventdev_pmd.h"
38 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
40 struct rte_eventdev *rte_eventdevs = rte_event_devices;
42 static struct rte_eventdev_global eventdev_globals = {
46 /* Event dev north bound API implementation */
49 rte_event_dev_count(void)
51 return eventdev_globals.nb_devs;
55 rte_event_dev_get_dev_id(const char *name)
63 for (i = 0; i < eventdev_globals.nb_devs; i++) {
64 cmp = (strncmp(rte_event_devices[i].data->name, name,
65 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
66 (rte_event_devices[i].dev ? (strncmp(
67 rte_event_devices[i].dev->driver->name, name,
68 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
69 if (cmp && (rte_event_devices[i].attached ==
70 RTE_EVENTDEV_ATTACHED))
77 rte_event_dev_socket_id(uint8_t dev_id)
79 struct rte_eventdev *dev;
81 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
82 dev = &rte_eventdevs[dev_id];
84 return dev->data->socket_id;
88 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
90 struct rte_eventdev *dev;
92 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
93 dev = &rte_eventdevs[dev_id];
98 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
100 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
101 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
103 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
105 dev_info->dev = dev->dev;
110 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
113 struct rte_eventdev *dev;
115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
116 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
118 dev = &rte_eventdevs[dev_id];
124 return dev->dev_ops->eth_rx_adapter_caps_get ?
125 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
126 &rte_eth_devices[eth_port_id],
131 int __rte_experimental
132 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
134 struct rte_eventdev *dev;
135 const struct rte_event_timer_adapter_ops *ops;
137 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
139 dev = &rte_eventdevs[dev_id];
145 return dev->dev_ops->timer_adapter_caps_get ?
146 (*dev->dev_ops->timer_adapter_caps_get)(dev,
153 int __rte_experimental
154 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
157 struct rte_eventdev *dev;
158 struct rte_cryptodev *cdev;
160 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
161 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
164 dev = &rte_eventdevs[dev_id];
165 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
171 return dev->dev_ops->crypto_adapter_caps_get ?
172 (*dev->dev_ops->crypto_adapter_caps_get)
173 (dev, cdev, caps) : -ENOTSUP;
176 int __rte_experimental
177 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
180 struct rte_eventdev *dev;
181 struct rte_eth_dev *eth_dev;
183 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
184 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
186 dev = &rte_eventdevs[dev_id];
187 eth_dev = &rte_eth_devices[eth_port_id];
194 return dev->dev_ops->eth_tx_adapter_caps_get ?
195 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
202 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
204 uint8_t old_nb_queues = dev->data->nb_queues;
205 struct rte_event_queue_conf *queues_cfg;
208 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
211 /* First time configuration */
212 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
213 /* Allocate memory to store queue configuration */
214 dev->data->queues_cfg = rte_zmalloc_socket(
215 "eventdev->data->queues_cfg",
216 sizeof(dev->data->queues_cfg[0]) * nb_queues,
217 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
218 if (dev->data->queues_cfg == NULL) {
219 dev->data->nb_queues = 0;
220 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
221 "nb_queues %u", nb_queues);
225 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
226 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
228 for (i = nb_queues; i < old_nb_queues; i++)
229 (*dev->dev_ops->queue_release)(dev, i);
231 /* Re allocate memory to store queue configuration */
232 queues_cfg = dev->data->queues_cfg;
233 queues_cfg = rte_realloc(queues_cfg,
234 sizeof(queues_cfg[0]) * nb_queues,
235 RTE_CACHE_LINE_SIZE);
236 if (queues_cfg == NULL) {
237 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
238 " nb_queues %u", nb_queues);
241 dev->data->queues_cfg = queues_cfg;
243 if (nb_queues > old_nb_queues) {
244 uint8_t new_qs = nb_queues - old_nb_queues;
246 memset(queues_cfg + old_nb_queues, 0,
247 sizeof(queues_cfg[0]) * new_qs);
249 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
250 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
252 for (i = nb_queues; i < old_nb_queues; i++)
253 (*dev->dev_ops->queue_release)(dev, i);
256 dev->data->nb_queues = nb_queues;
260 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
263 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
265 uint8_t old_nb_ports = dev->data->nb_ports;
268 struct rte_event_port_conf *ports_cfg;
271 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
274 /* First time configuration */
275 if (dev->data->ports == NULL && nb_ports != 0) {
276 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
277 sizeof(dev->data->ports[0]) * nb_ports,
278 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
279 if (dev->data->ports == NULL) {
280 dev->data->nb_ports = 0;
281 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
282 "nb_ports %u", nb_ports);
286 /* Allocate memory to store port configurations */
287 dev->data->ports_cfg =
288 rte_zmalloc_socket("eventdev->ports_cfg",
289 sizeof(dev->data->ports_cfg[0]) * nb_ports,
290 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
291 if (dev->data->ports_cfg == NULL) {
292 dev->data->nb_ports = 0;
293 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
294 "nb_ports %u", nb_ports);
298 /* Allocate memory to store queue to port link connection */
299 dev->data->links_map =
300 rte_zmalloc_socket("eventdev->links_map",
301 sizeof(dev->data->links_map[0]) * nb_ports *
302 RTE_EVENT_MAX_QUEUES_PER_DEV,
303 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
304 if (dev->data->links_map == NULL) {
305 dev->data->nb_ports = 0;
306 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
307 "nb_ports %u", nb_ports);
310 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
311 dev->data->links_map[i] =
312 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
313 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
314 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
316 ports = dev->data->ports;
317 ports_cfg = dev->data->ports_cfg;
318 links_map = dev->data->links_map;
320 for (i = nb_ports; i < old_nb_ports; i++)
321 (*dev->dev_ops->port_release)(ports[i]);
323 /* Realloc memory for ports */
324 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
325 RTE_CACHE_LINE_SIZE);
327 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
328 " nb_ports %u", nb_ports);
332 /* Realloc memory for ports_cfg */
333 ports_cfg = rte_realloc(ports_cfg,
334 sizeof(ports_cfg[0]) * nb_ports,
335 RTE_CACHE_LINE_SIZE);
336 if (ports_cfg == NULL) {
337 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
338 " nb_ports %u", nb_ports);
342 /* Realloc memory to store queue to port link connection */
343 links_map = rte_realloc(links_map,
344 sizeof(dev->data->links_map[0]) * nb_ports *
345 RTE_EVENT_MAX_QUEUES_PER_DEV,
346 RTE_CACHE_LINE_SIZE);
347 if (links_map == NULL) {
348 dev->data->nb_ports = 0;
349 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
350 "nb_ports %u", nb_ports);
354 if (nb_ports > old_nb_ports) {
355 uint8_t new_ps = nb_ports - old_nb_ports;
356 unsigned int old_links_map_end =
357 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
358 unsigned int links_map_end =
359 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
361 memset(ports + old_nb_ports, 0,
362 sizeof(ports[0]) * new_ps);
363 memset(ports_cfg + old_nb_ports, 0,
364 sizeof(ports_cfg[0]) * new_ps);
365 for (i = old_links_map_end; i < links_map_end; i++)
367 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
370 dev->data->ports = ports;
371 dev->data->ports_cfg = ports_cfg;
372 dev->data->links_map = links_map;
373 } else if (dev->data->ports != NULL && nb_ports == 0) {
374 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
376 ports = dev->data->ports;
377 for (i = nb_ports; i < old_nb_ports; i++)
378 (*dev->dev_ops->port_release)(ports[i]);
381 dev->data->nb_ports = nb_ports;
386 rte_event_dev_configure(uint8_t dev_id,
387 const struct rte_event_dev_config *dev_conf)
389 struct rte_eventdev *dev;
390 struct rte_event_dev_info info;
393 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
394 dev = &rte_eventdevs[dev_id];
396 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
397 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
399 if (dev->data->dev_started) {
401 "device %d must be stopped to allow configuration", dev_id);
405 if (dev_conf == NULL)
408 (*dev->dev_ops->dev_infos_get)(dev, &info);
410 /* Check dequeue_timeout_ns value is in limit */
411 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
412 if (dev_conf->dequeue_timeout_ns &&
413 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
414 || dev_conf->dequeue_timeout_ns >
415 info.max_dequeue_timeout_ns)) {
416 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
417 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
418 dev_id, dev_conf->dequeue_timeout_ns,
419 info.min_dequeue_timeout_ns,
420 info.max_dequeue_timeout_ns);
425 /* Check nb_events_limit is in limit */
426 if (dev_conf->nb_events_limit > info.max_num_events) {
427 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
428 dev_id, dev_conf->nb_events_limit, info.max_num_events);
432 /* Check nb_event_queues is in limit */
433 if (!dev_conf->nb_event_queues) {
434 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
438 if (dev_conf->nb_event_queues > info.max_event_queues) {
439 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
440 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
444 /* Check nb_event_ports is in limit */
445 if (!dev_conf->nb_event_ports) {
446 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
449 if (dev_conf->nb_event_ports > info.max_event_ports) {
450 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
451 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
455 /* Check nb_event_queue_flows is in limit */
456 if (!dev_conf->nb_event_queue_flows) {
457 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
460 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
461 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
462 dev_id, dev_conf->nb_event_queue_flows,
463 info.max_event_queue_flows);
467 /* Check nb_event_port_dequeue_depth is in limit */
468 if (!dev_conf->nb_event_port_dequeue_depth) {
469 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
473 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
474 (dev_conf->nb_event_port_dequeue_depth >
475 info.max_event_port_dequeue_depth)) {
476 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
477 dev_id, dev_conf->nb_event_port_dequeue_depth,
478 info.max_event_port_dequeue_depth);
482 /* Check nb_event_port_enqueue_depth is in limit */
483 if (!dev_conf->nb_event_port_enqueue_depth) {
484 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
488 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
489 (dev_conf->nb_event_port_enqueue_depth >
490 info.max_event_port_enqueue_depth)) {
491 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
492 dev_id, dev_conf->nb_event_port_enqueue_depth,
493 info.max_event_port_enqueue_depth);
497 /* Copy the dev_conf parameter into the dev structure */
498 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
500 /* Setup new number of queues and reconfigure device. */
501 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
503 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
508 /* Setup new number of ports and reconfigure device. */
509 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
511 rte_event_dev_queue_config(dev, 0);
512 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
517 /* Configure the device */
518 diag = (*dev->dev_ops->dev_configure)(dev);
520 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
521 rte_event_dev_queue_config(dev, 0);
522 rte_event_dev_port_config(dev, 0);
525 dev->data->event_dev_cap = info.event_dev_cap;
530 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
532 if (queue_id < dev->data->nb_queues && queue_id <
533 RTE_EVENT_MAX_QUEUES_PER_DEV)
540 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
541 struct rte_event_queue_conf *queue_conf)
543 struct rte_eventdev *dev;
545 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
546 dev = &rte_eventdevs[dev_id];
548 if (queue_conf == NULL)
551 if (!is_valid_queue(dev, queue_id)) {
552 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
556 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
557 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
558 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
563 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
566 !(queue_conf->event_queue_cfg &
567 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
568 ((queue_conf->event_queue_cfg &
569 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
570 (queue_conf->schedule_type
571 == RTE_SCHED_TYPE_ATOMIC)
579 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
582 !(queue_conf->event_queue_cfg &
583 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
584 ((queue_conf->event_queue_cfg &
585 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
586 (queue_conf->schedule_type
587 == RTE_SCHED_TYPE_ORDERED)
596 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
597 const struct rte_event_queue_conf *queue_conf)
599 struct rte_eventdev *dev;
600 struct rte_event_queue_conf def_conf;
602 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
603 dev = &rte_eventdevs[dev_id];
605 if (!is_valid_queue(dev, queue_id)) {
606 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
610 /* Check nb_atomic_flows limit */
611 if (is_valid_atomic_queue_conf(queue_conf)) {
612 if (queue_conf->nb_atomic_flows == 0 ||
613 queue_conf->nb_atomic_flows >
614 dev->data->dev_conf.nb_event_queue_flows) {
616 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
617 dev_id, queue_id, queue_conf->nb_atomic_flows,
618 dev->data->dev_conf.nb_event_queue_flows);
623 /* Check nb_atomic_order_sequences limit */
624 if (is_valid_ordered_queue_conf(queue_conf)) {
625 if (queue_conf->nb_atomic_order_sequences == 0 ||
626 queue_conf->nb_atomic_order_sequences >
627 dev->data->dev_conf.nb_event_queue_flows) {
629 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
630 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
631 dev->data->dev_conf.nb_event_queue_flows);
636 if (dev->data->dev_started) {
638 "device %d must be stopped to allow queue setup", dev_id);
642 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
644 if (queue_conf == NULL) {
645 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
647 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
648 queue_conf = &def_conf;
651 dev->data->queues_cfg[queue_id] = *queue_conf;
652 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
656 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
658 if (port_id < dev->data->nb_ports)
665 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
666 struct rte_event_port_conf *port_conf)
668 struct rte_eventdev *dev;
670 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
671 dev = &rte_eventdevs[dev_id];
673 if (port_conf == NULL)
676 if (!is_valid_port(dev, port_id)) {
677 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
681 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
682 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
683 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
688 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
689 const struct rte_event_port_conf *port_conf)
691 struct rte_eventdev *dev;
692 struct rte_event_port_conf def_conf;
695 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
696 dev = &rte_eventdevs[dev_id];
698 if (!is_valid_port(dev, port_id)) {
699 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
703 /* Check new_event_threshold limit */
704 if ((port_conf && !port_conf->new_event_threshold) ||
705 (port_conf && port_conf->new_event_threshold >
706 dev->data->dev_conf.nb_events_limit)) {
708 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
709 dev_id, port_id, port_conf->new_event_threshold,
710 dev->data->dev_conf.nb_events_limit);
714 /* Check dequeue_depth limit */
715 if ((port_conf && !port_conf->dequeue_depth) ||
716 (port_conf && port_conf->dequeue_depth >
717 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
719 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
720 dev_id, port_id, port_conf->dequeue_depth,
721 dev->data->dev_conf.nb_event_port_dequeue_depth);
725 /* Check enqueue_depth limit */
726 if ((port_conf && !port_conf->enqueue_depth) ||
727 (port_conf && port_conf->enqueue_depth >
728 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
730 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
731 dev_id, port_id, port_conf->enqueue_depth,
732 dev->data->dev_conf.nb_event_port_enqueue_depth);
736 if (port_conf && port_conf->disable_implicit_release &&
737 !(dev->data->event_dev_cap &
738 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
740 "dev%d port%d Implicit release disable not supported",
745 if (dev->data->dev_started) {
747 "device %d must be stopped to allow port setup", dev_id);
751 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
753 if (port_conf == NULL) {
754 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
756 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
757 port_conf = &def_conf;
760 dev->data->ports_cfg[port_id] = *port_conf;
762 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
764 /* Unlink all the queues from this port(default state after setup) */
766 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
775 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
776 uint32_t *attr_value)
778 struct rte_eventdev *dev;
782 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
783 dev = &rte_eventdevs[dev_id];
786 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
787 *attr_value = dev->data->nb_ports;
789 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
790 *attr_value = dev->data->nb_queues;
792 case RTE_EVENT_DEV_ATTR_STARTED:
793 *attr_value = dev->data->dev_started;
803 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
804 uint32_t *attr_value)
806 struct rte_eventdev *dev;
811 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
812 dev = &rte_eventdevs[dev_id];
813 if (!is_valid_port(dev, port_id)) {
814 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
819 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
820 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
822 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
823 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
825 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
826 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
835 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
836 uint32_t *attr_value)
838 struct rte_event_queue_conf *conf;
839 struct rte_eventdev *dev;
844 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
845 dev = &rte_eventdevs[dev_id];
846 if (!is_valid_queue(dev, queue_id)) {
847 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
851 conf = &dev->data->queues_cfg[queue_id];
854 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
855 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
856 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
857 *attr_value = conf->priority;
859 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
860 *attr_value = conf->nb_atomic_flows;
862 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
863 *attr_value = conf->nb_atomic_order_sequences;
865 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
866 *attr_value = conf->event_queue_cfg;
868 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
869 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
872 *attr_value = conf->schedule_type;
881 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
882 const uint8_t queues[], const uint8_t priorities[],
885 struct rte_eventdev *dev;
886 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
887 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
891 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
892 dev = &rte_eventdevs[dev_id];
894 if (*dev->dev_ops->port_link == NULL) {
895 RTE_PMD_DEBUG_TRACE("Function not supported\n");
896 rte_errno = -ENOTSUP;
900 if (!is_valid_port(dev, port_id)) {
901 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
906 if (queues == NULL) {
907 for (i = 0; i < dev->data->nb_queues; i++)
910 queues = queues_list;
911 nb_links = dev->data->nb_queues;
914 if (priorities == NULL) {
915 for (i = 0; i < nb_links; i++)
916 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
918 priorities = priorities_list;
921 for (i = 0; i < nb_links; i++)
922 if (queues[i] >= dev->data->nb_queues) {
927 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
928 queues, priorities, nb_links);
932 links_map = dev->data->links_map;
933 /* Point links_map to this port specific area */
934 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
935 for (i = 0; i < diag; i++)
936 links_map[queues[i]] = (uint8_t)priorities[i];
942 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
943 uint8_t queues[], uint16_t nb_unlinks)
945 struct rte_eventdev *dev;
946 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
950 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
951 dev = &rte_eventdevs[dev_id];
953 if (*dev->dev_ops->port_unlink == NULL) {
954 RTE_PMD_DEBUG_TRACE("Function not supported\n");
955 rte_errno = -ENOTSUP;
959 if (!is_valid_port(dev, port_id)) {
960 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
965 links_map = dev->data->links_map;
966 /* Point links_map to this port specific area */
967 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
969 if (queues == NULL) {
971 for (i = 0; i < dev->data->nb_queues; i++) {
973 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
980 for (j = 0; j < nb_unlinks; j++) {
981 if (links_map[queues[j]] ==
982 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
988 for (i = 0; i < nb_unlinks; i++)
989 if (queues[i] >= dev->data->nb_queues) {
994 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1000 for (i = 0; i < diag; i++)
1001 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1006 int __rte_experimental
1007 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1009 struct rte_eventdev *dev;
1011 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1012 dev = &rte_eventdevs[dev_id];
1013 if (!is_valid_port(dev, port_id)) {
1014 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1018 /* Return 0 if the PMD does not implement unlinks in progress.
1019 * This allows PMDs which handle unlink synchronously to not implement
1020 * this function at all.
1022 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1024 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1025 dev->data->ports[port_id]);
1029 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1030 uint8_t queues[], uint8_t priorities[])
1032 struct rte_eventdev *dev;
1033 uint16_t *links_map;
1036 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1037 dev = &rte_eventdevs[dev_id];
1038 if (!is_valid_port(dev, port_id)) {
1039 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1043 links_map = dev->data->links_map;
1044 /* Point links_map to this port specific area */
1045 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1046 for (i = 0; i < dev->data->nb_queues; i++) {
1047 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1049 priorities[count] = (uint8_t)links_map[i];
1057 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1058 uint64_t *timeout_ticks)
1060 struct rte_eventdev *dev;
1062 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1063 dev = &rte_eventdevs[dev_id];
1064 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1066 if (timeout_ticks == NULL)
1069 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1073 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1075 struct rte_eventdev *dev;
1077 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1078 dev = &rte_eventdevs[dev_id];
1080 if (service_id == NULL)
1083 if (dev->data->service_inited)
1084 *service_id = dev->data->service_id;
1086 return dev->data->service_inited ? 0 : -ESRCH;
1090 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1092 struct rte_eventdev *dev;
1094 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1095 dev = &rte_eventdevs[dev_id];
1096 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1098 (*dev->dev_ops->dump)(dev, f);
1104 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1105 uint8_t queue_port_id)
1107 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1108 if (dev->dev_ops->xstats_get_names != NULL)
1109 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1116 rte_event_dev_xstats_names_get(uint8_t dev_id,
1117 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1118 struct rte_event_dev_xstats_name *xstats_names,
1119 unsigned int *ids, unsigned int size)
1121 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1122 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1124 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1125 (int)size < cnt_expected_entries)
1126 return cnt_expected_entries;
1128 /* dev_id checked above */
1129 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1131 if (dev->dev_ops->xstats_get_names != NULL)
1132 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1133 queue_port_id, xstats_names, ids, size);
1138 /* retrieve eventdev extended statistics */
1140 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1141 uint8_t queue_port_id, const unsigned int ids[],
1142 uint64_t values[], unsigned int n)
1144 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1145 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1147 /* implemented by the driver */
1148 if (dev->dev_ops->xstats_get != NULL)
1149 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1155 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1158 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1159 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1160 unsigned int temp = -1;
1163 *id = (unsigned int)-1;
1165 id = &temp; /* ensure driver never gets a NULL value */
1167 /* implemented by driver */
1168 if (dev->dev_ops->xstats_get_by_name != NULL)
1169 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1173 int rte_event_dev_xstats_reset(uint8_t dev_id,
1174 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1175 const uint32_t ids[], uint32_t nb_ids)
1177 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1178 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1180 if (dev->dev_ops->xstats_reset != NULL)
1181 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1186 int rte_event_dev_selftest(uint8_t dev_id)
1188 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1189 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1191 if (dev->dev_ops->dev_selftest != NULL)
1192 return (*dev->dev_ops->dev_selftest)();
1197 rte_event_dev_start(uint8_t dev_id)
1199 struct rte_eventdev *dev;
1202 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1204 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1205 dev = &rte_eventdevs[dev_id];
1206 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1208 if (dev->data->dev_started != 0) {
1209 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1214 diag = (*dev->dev_ops->dev_start)(dev);
1216 dev->data->dev_started = 1;
1224 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1225 eventdev_stop_flush_t callback, void *userdata)
1227 struct rte_eventdev *dev;
1229 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1231 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1232 dev = &rte_eventdevs[dev_id];
1234 dev->dev_ops->dev_stop_flush = callback;
1235 dev->data->dev_stop_flush_arg = userdata;
1241 rte_event_dev_stop(uint8_t dev_id)
1243 struct rte_eventdev *dev;
1245 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1247 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1248 dev = &rte_eventdevs[dev_id];
1249 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1251 if (dev->data->dev_started == 0) {
1252 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1257 dev->data->dev_started = 0;
1258 (*dev->dev_ops->dev_stop)(dev);
1262 rte_event_dev_close(uint8_t dev_id)
1264 struct rte_eventdev *dev;
1266 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1267 dev = &rte_eventdevs[dev_id];
1268 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1270 /* Device must be stopped before it can be closed */
1271 if (dev->data->dev_started == 1) {
1272 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1277 return (*dev->dev_ops->dev_close)(dev);
1281 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1284 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1285 const struct rte_memzone *mz;
1288 /* Generate memzone name */
1289 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1290 if (n >= (int)sizeof(mz_name))
1293 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1294 mz = rte_memzone_reserve(mz_name,
1295 sizeof(struct rte_eventdev_data),
1298 mz = rte_memzone_lookup(mz_name);
1304 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1305 memset(*data, 0, sizeof(struct rte_eventdev_data));
1310 static inline uint8_t
1311 rte_eventdev_find_free_device_index(void)
1315 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1316 if (rte_eventdevs[dev_id].attached ==
1317 RTE_EVENTDEV_DETACHED)
1320 return RTE_EVENT_MAX_DEVS;
1324 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1325 __rte_unused struct rte_event ev[],
1326 __rte_unused uint16_t nb_events)
1328 rte_errno = ENOTSUP;
1332 struct rte_eventdev *
1333 rte_event_pmd_allocate(const char *name, int socket_id)
1335 struct rte_eventdev *eventdev;
1338 if (rte_event_pmd_get_named_dev(name) != NULL) {
1339 RTE_EDEV_LOG_ERR("Event device with name %s already "
1340 "allocated!", name);
1344 dev_id = rte_eventdev_find_free_device_index();
1345 if (dev_id == RTE_EVENT_MAX_DEVS) {
1346 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1350 eventdev = &rte_eventdevs[dev_id];
1352 eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1354 if (eventdev->data == NULL) {
1355 struct rte_eventdev_data *eventdev_data = NULL;
1357 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1360 if (retval < 0 || eventdev_data == NULL)
1363 eventdev->data = eventdev_data;
1365 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1368 eventdev->data->dev_id = dev_id;
1369 eventdev->data->socket_id = socket_id;
1370 eventdev->data->dev_started = 0;
1372 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1374 eventdev_globals.nb_devs++;
1381 rte_event_pmd_release(struct rte_eventdev *eventdev)
1384 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1385 const struct rte_memzone *mz;
1387 if (eventdev == NULL)
1390 eventdev->attached = RTE_EVENTDEV_DETACHED;
1391 eventdev_globals.nb_devs--;
1393 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1394 rte_free(eventdev->data->dev_private);
1396 /* Generate memzone name */
1397 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1398 eventdev->data->dev_id);
1399 if (ret >= (int)sizeof(mz_name))
1402 mz = rte_memzone_lookup(mz_name);
1406 ret = rte_memzone_free(mz);
1411 eventdev->data = NULL;