1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <rte_ethdev.h>
33 #include <rte_cryptodev.h>
34 #include <rte_cryptodev_pmd.h>
36 #include "rte_eventdev.h"
37 #include "rte_eventdev_pmd.h"
39 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
41 struct rte_eventdev *rte_eventdevs = rte_event_devices;
43 static struct rte_eventdev_global eventdev_globals = {
47 /* Event dev north bound API implementation */
50 rte_event_dev_count(void)
52 return eventdev_globals.nb_devs;
56 rte_event_dev_get_dev_id(const char *name)
64 for (i = 0; i < eventdev_globals.nb_devs; i++) {
65 cmp = (strncmp(rte_event_devices[i].data->name, name,
66 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
67 (rte_event_devices[i].dev ? (strncmp(
68 rte_event_devices[i].dev->driver->name, name,
69 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
70 if (cmp && (rte_event_devices[i].attached ==
71 RTE_EVENTDEV_ATTACHED))
78 rte_event_dev_socket_id(uint8_t dev_id)
80 struct rte_eventdev *dev;
82 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
83 dev = &rte_eventdevs[dev_id];
85 return dev->data->socket_id;
89 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
91 struct rte_eventdev *dev;
93 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
94 dev = &rte_eventdevs[dev_id];
99 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
101 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
102 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
104 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
106 dev_info->dev = dev->dev;
111 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
114 struct rte_eventdev *dev;
116 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
117 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
119 dev = &rte_eventdevs[dev_id];
125 return dev->dev_ops->eth_rx_adapter_caps_get ?
126 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
127 &rte_eth_devices[eth_port_id],
133 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
135 struct rte_eventdev *dev;
136 const struct rte_event_timer_adapter_ops *ops;
138 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
140 dev = &rte_eventdevs[dev_id];
146 return dev->dev_ops->timer_adapter_caps_get ?
147 (*dev->dev_ops->timer_adapter_caps_get)(dev,
155 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
158 struct rte_eventdev *dev;
159 struct rte_cryptodev *cdev;
161 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
162 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
165 dev = &rte_eventdevs[dev_id];
166 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
172 return dev->dev_ops->crypto_adapter_caps_get ?
173 (*dev->dev_ops->crypto_adapter_caps_get)
174 (dev, cdev, caps) : -ENOTSUP;
178 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
181 struct rte_eventdev *dev;
182 struct rte_eth_dev *eth_dev;
184 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
185 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
187 dev = &rte_eventdevs[dev_id];
188 eth_dev = &rte_eth_devices[eth_port_id];
195 return dev->dev_ops->eth_tx_adapter_caps_get ?
196 (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
203 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
205 uint8_t old_nb_queues = dev->data->nb_queues;
206 struct rte_event_queue_conf *queues_cfg;
209 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
212 /* First time configuration */
213 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
214 /* Allocate memory to store queue configuration */
215 dev->data->queues_cfg = rte_zmalloc_socket(
216 "eventdev->data->queues_cfg",
217 sizeof(dev->data->queues_cfg[0]) * nb_queues,
218 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
219 if (dev->data->queues_cfg == NULL) {
220 dev->data->nb_queues = 0;
221 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
222 "nb_queues %u", nb_queues);
226 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
227 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
229 for (i = nb_queues; i < old_nb_queues; i++)
230 (*dev->dev_ops->queue_release)(dev, i);
232 /* Re allocate memory to store queue configuration */
233 queues_cfg = dev->data->queues_cfg;
234 queues_cfg = rte_realloc(queues_cfg,
235 sizeof(queues_cfg[0]) * nb_queues,
236 RTE_CACHE_LINE_SIZE);
237 if (queues_cfg == NULL) {
238 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
239 " nb_queues %u", nb_queues);
242 dev->data->queues_cfg = queues_cfg;
244 if (nb_queues > old_nb_queues) {
245 uint8_t new_qs = nb_queues - old_nb_queues;
247 memset(queues_cfg + old_nb_queues, 0,
248 sizeof(queues_cfg[0]) * new_qs);
250 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
251 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
253 for (i = nb_queues; i < old_nb_queues; i++)
254 (*dev->dev_ops->queue_release)(dev, i);
257 dev->data->nb_queues = nb_queues;
261 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
264 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
266 uint8_t old_nb_ports = dev->data->nb_ports;
269 struct rte_event_port_conf *ports_cfg;
272 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
275 /* First time configuration */
276 if (dev->data->ports == NULL && nb_ports != 0) {
277 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
278 sizeof(dev->data->ports[0]) * nb_ports,
279 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
280 if (dev->data->ports == NULL) {
281 dev->data->nb_ports = 0;
282 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
283 "nb_ports %u", nb_ports);
287 /* Allocate memory to store port configurations */
288 dev->data->ports_cfg =
289 rte_zmalloc_socket("eventdev->ports_cfg",
290 sizeof(dev->data->ports_cfg[0]) * nb_ports,
291 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
292 if (dev->data->ports_cfg == NULL) {
293 dev->data->nb_ports = 0;
294 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
295 "nb_ports %u", nb_ports);
299 /* Allocate memory to store queue to port link connection */
300 dev->data->links_map =
301 rte_zmalloc_socket("eventdev->links_map",
302 sizeof(dev->data->links_map[0]) * nb_ports *
303 RTE_EVENT_MAX_QUEUES_PER_DEV,
304 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
305 if (dev->data->links_map == NULL) {
306 dev->data->nb_ports = 0;
307 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
308 "nb_ports %u", nb_ports);
311 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
312 dev->data->links_map[i] =
313 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
314 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
315 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
317 ports = dev->data->ports;
318 ports_cfg = dev->data->ports_cfg;
319 links_map = dev->data->links_map;
321 for (i = nb_ports; i < old_nb_ports; i++)
322 (*dev->dev_ops->port_release)(ports[i]);
324 /* Realloc memory for ports */
325 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
326 RTE_CACHE_LINE_SIZE);
328 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
329 " nb_ports %u", nb_ports);
333 /* Realloc memory for ports_cfg */
334 ports_cfg = rte_realloc(ports_cfg,
335 sizeof(ports_cfg[0]) * nb_ports,
336 RTE_CACHE_LINE_SIZE);
337 if (ports_cfg == NULL) {
338 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
339 " nb_ports %u", nb_ports);
343 /* Realloc memory to store queue to port link connection */
344 links_map = rte_realloc(links_map,
345 sizeof(dev->data->links_map[0]) * nb_ports *
346 RTE_EVENT_MAX_QUEUES_PER_DEV,
347 RTE_CACHE_LINE_SIZE);
348 if (links_map == NULL) {
349 dev->data->nb_ports = 0;
350 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
351 "nb_ports %u", nb_ports);
355 if (nb_ports > old_nb_ports) {
356 uint8_t new_ps = nb_ports - old_nb_ports;
357 unsigned int old_links_map_end =
358 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
359 unsigned int links_map_end =
360 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
362 memset(ports + old_nb_ports, 0,
363 sizeof(ports[0]) * new_ps);
364 memset(ports_cfg + old_nb_ports, 0,
365 sizeof(ports_cfg[0]) * new_ps);
366 for (i = old_links_map_end; i < links_map_end; i++)
368 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
371 dev->data->ports = ports;
372 dev->data->ports_cfg = ports_cfg;
373 dev->data->links_map = links_map;
374 } else if (dev->data->ports != NULL && nb_ports == 0) {
375 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
377 ports = dev->data->ports;
378 for (i = nb_ports; i < old_nb_ports; i++)
379 (*dev->dev_ops->port_release)(ports[i]);
382 dev->data->nb_ports = nb_ports;
387 rte_event_dev_configure(uint8_t dev_id,
388 const struct rte_event_dev_config *dev_conf)
390 struct rte_eventdev *dev;
391 struct rte_event_dev_info info;
394 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
395 dev = &rte_eventdevs[dev_id];
397 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
398 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
400 if (dev->data->dev_started) {
402 "device %d must be stopped to allow configuration", dev_id);
406 if (dev_conf == NULL)
409 (*dev->dev_ops->dev_infos_get)(dev, &info);
411 /* Check dequeue_timeout_ns value is in limit */
412 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
413 if (dev_conf->dequeue_timeout_ns &&
414 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
415 || dev_conf->dequeue_timeout_ns >
416 info.max_dequeue_timeout_ns)) {
417 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
418 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
419 dev_id, dev_conf->dequeue_timeout_ns,
420 info.min_dequeue_timeout_ns,
421 info.max_dequeue_timeout_ns);
426 /* Check nb_events_limit is in limit */
427 if (dev_conf->nb_events_limit > info.max_num_events) {
428 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
429 dev_id, dev_conf->nb_events_limit, info.max_num_events);
433 /* Check nb_event_queues is in limit */
434 if (!dev_conf->nb_event_queues) {
435 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
439 if (dev_conf->nb_event_queues > info.max_event_queues) {
440 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
441 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
445 /* Check nb_event_ports is in limit */
446 if (!dev_conf->nb_event_ports) {
447 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
450 if (dev_conf->nb_event_ports > info.max_event_ports) {
451 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
452 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
456 /* Check nb_event_queue_flows is in limit */
457 if (!dev_conf->nb_event_queue_flows) {
458 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
461 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
462 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
463 dev_id, dev_conf->nb_event_queue_flows,
464 info.max_event_queue_flows);
468 /* Check nb_event_port_dequeue_depth is in limit */
469 if (!dev_conf->nb_event_port_dequeue_depth) {
470 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
474 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
475 (dev_conf->nb_event_port_dequeue_depth >
476 info.max_event_port_dequeue_depth)) {
477 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
478 dev_id, dev_conf->nb_event_port_dequeue_depth,
479 info.max_event_port_dequeue_depth);
483 /* Check nb_event_port_enqueue_depth is in limit */
484 if (!dev_conf->nb_event_port_enqueue_depth) {
485 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
489 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
490 (dev_conf->nb_event_port_enqueue_depth >
491 info.max_event_port_enqueue_depth)) {
492 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
493 dev_id, dev_conf->nb_event_port_enqueue_depth,
494 info.max_event_port_enqueue_depth);
498 /* Copy the dev_conf parameter into the dev structure */
499 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
501 /* Setup new number of queues and reconfigure device. */
502 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
504 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
509 /* Setup new number of ports and reconfigure device. */
510 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
512 rte_event_dev_queue_config(dev, 0);
513 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
518 /* Configure the device */
519 diag = (*dev->dev_ops->dev_configure)(dev);
521 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
522 rte_event_dev_queue_config(dev, 0);
523 rte_event_dev_port_config(dev, 0);
526 dev->data->event_dev_cap = info.event_dev_cap;
531 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
533 if (queue_id < dev->data->nb_queues && queue_id <
534 RTE_EVENT_MAX_QUEUES_PER_DEV)
541 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
542 struct rte_event_queue_conf *queue_conf)
544 struct rte_eventdev *dev;
546 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
547 dev = &rte_eventdevs[dev_id];
549 if (queue_conf == NULL)
552 if (!is_valid_queue(dev, queue_id)) {
553 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
557 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
558 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
559 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
564 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
567 !(queue_conf->event_queue_cfg &
568 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
569 ((queue_conf->event_queue_cfg &
570 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
571 (queue_conf->schedule_type
572 == RTE_SCHED_TYPE_ATOMIC)
580 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
583 !(queue_conf->event_queue_cfg &
584 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
585 ((queue_conf->event_queue_cfg &
586 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
587 (queue_conf->schedule_type
588 == RTE_SCHED_TYPE_ORDERED)
597 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
598 const struct rte_event_queue_conf *queue_conf)
600 struct rte_eventdev *dev;
601 struct rte_event_queue_conf def_conf;
603 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
604 dev = &rte_eventdevs[dev_id];
606 if (!is_valid_queue(dev, queue_id)) {
607 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
611 /* Check nb_atomic_flows limit */
612 if (is_valid_atomic_queue_conf(queue_conf)) {
613 if (queue_conf->nb_atomic_flows == 0 ||
614 queue_conf->nb_atomic_flows >
615 dev->data->dev_conf.nb_event_queue_flows) {
617 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
618 dev_id, queue_id, queue_conf->nb_atomic_flows,
619 dev->data->dev_conf.nb_event_queue_flows);
624 /* Check nb_atomic_order_sequences limit */
625 if (is_valid_ordered_queue_conf(queue_conf)) {
626 if (queue_conf->nb_atomic_order_sequences == 0 ||
627 queue_conf->nb_atomic_order_sequences >
628 dev->data->dev_conf.nb_event_queue_flows) {
630 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
631 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
632 dev->data->dev_conf.nb_event_queue_flows);
637 if (dev->data->dev_started) {
639 "device %d must be stopped to allow queue setup", dev_id);
643 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
645 if (queue_conf == NULL) {
646 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
648 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
649 queue_conf = &def_conf;
652 dev->data->queues_cfg[queue_id] = *queue_conf;
653 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
657 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
659 if (port_id < dev->data->nb_ports)
666 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
667 struct rte_event_port_conf *port_conf)
669 struct rte_eventdev *dev;
671 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
672 dev = &rte_eventdevs[dev_id];
674 if (port_conf == NULL)
677 if (!is_valid_port(dev, port_id)) {
678 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
682 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
683 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
684 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
689 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
690 const struct rte_event_port_conf *port_conf)
692 struct rte_eventdev *dev;
693 struct rte_event_port_conf def_conf;
696 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
697 dev = &rte_eventdevs[dev_id];
699 if (!is_valid_port(dev, port_id)) {
700 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
704 /* Check new_event_threshold limit */
705 if ((port_conf && !port_conf->new_event_threshold) ||
706 (port_conf && port_conf->new_event_threshold >
707 dev->data->dev_conf.nb_events_limit)) {
709 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
710 dev_id, port_id, port_conf->new_event_threshold,
711 dev->data->dev_conf.nb_events_limit);
715 /* Check dequeue_depth limit */
716 if ((port_conf && !port_conf->dequeue_depth) ||
717 (port_conf && port_conf->dequeue_depth >
718 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
720 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
721 dev_id, port_id, port_conf->dequeue_depth,
722 dev->data->dev_conf.nb_event_port_dequeue_depth);
726 /* Check enqueue_depth limit */
727 if ((port_conf && !port_conf->enqueue_depth) ||
728 (port_conf && port_conf->enqueue_depth >
729 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
731 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
732 dev_id, port_id, port_conf->enqueue_depth,
733 dev->data->dev_conf.nb_event_port_enqueue_depth);
737 if (port_conf && port_conf->disable_implicit_release &&
738 !(dev->data->event_dev_cap &
739 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
741 "dev%d port%d Implicit release disable not supported",
746 if (dev->data->dev_started) {
748 "device %d must be stopped to allow port setup", dev_id);
752 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
754 if (port_conf == NULL) {
755 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
757 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
758 port_conf = &def_conf;
761 dev->data->ports_cfg[port_id] = *port_conf;
763 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
765 /* Unlink all the queues from this port(default state after setup) */
767 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
776 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
777 uint32_t *attr_value)
779 struct rte_eventdev *dev;
783 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
784 dev = &rte_eventdevs[dev_id];
787 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
788 *attr_value = dev->data->nb_ports;
790 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
791 *attr_value = dev->data->nb_queues;
793 case RTE_EVENT_DEV_ATTR_STARTED:
794 *attr_value = dev->data->dev_started;
804 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
805 uint32_t *attr_value)
807 struct rte_eventdev *dev;
812 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
813 dev = &rte_eventdevs[dev_id];
814 if (!is_valid_port(dev, port_id)) {
815 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
820 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
821 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
823 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
824 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
826 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
827 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
836 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
837 uint32_t *attr_value)
839 struct rte_event_queue_conf *conf;
840 struct rte_eventdev *dev;
845 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
846 dev = &rte_eventdevs[dev_id];
847 if (!is_valid_queue(dev, queue_id)) {
848 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
852 conf = &dev->data->queues_cfg[queue_id];
855 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
856 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
857 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
858 *attr_value = conf->priority;
860 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
861 *attr_value = conf->nb_atomic_flows;
863 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
864 *attr_value = conf->nb_atomic_order_sequences;
866 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
867 *attr_value = conf->event_queue_cfg;
869 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
870 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
873 *attr_value = conf->schedule_type;
882 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
883 const uint8_t queues[], const uint8_t priorities[],
886 struct rte_eventdev *dev;
887 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
888 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
892 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
893 dev = &rte_eventdevs[dev_id];
895 if (*dev->dev_ops->port_link == NULL) {
896 RTE_EDEV_LOG_ERR("Function not supported\n");
901 if (!is_valid_port(dev, port_id)) {
902 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
907 if (queues == NULL) {
908 for (i = 0; i < dev->data->nb_queues; i++)
911 queues = queues_list;
912 nb_links = dev->data->nb_queues;
915 if (priorities == NULL) {
916 for (i = 0; i < nb_links; i++)
917 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
919 priorities = priorities_list;
922 for (i = 0; i < nb_links; i++)
923 if (queues[i] >= dev->data->nb_queues) {
928 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
929 queues, priorities, nb_links);
933 links_map = dev->data->links_map;
934 /* Point links_map to this port specific area */
935 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
936 for (i = 0; i < diag; i++)
937 links_map[queues[i]] = (uint8_t)priorities[i];
943 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
944 uint8_t queues[], uint16_t nb_unlinks)
946 struct rte_eventdev *dev;
947 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
951 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
952 dev = &rte_eventdevs[dev_id];
954 if (*dev->dev_ops->port_unlink == NULL) {
955 RTE_EDEV_LOG_ERR("Function not supported");
960 if (!is_valid_port(dev, port_id)) {
961 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
966 links_map = dev->data->links_map;
967 /* Point links_map to this port specific area */
968 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
970 if (queues == NULL) {
972 for (i = 0; i < dev->data->nb_queues; i++) {
974 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
981 for (j = 0; j < nb_unlinks; j++) {
982 if (links_map[queues[j]] ==
983 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
989 for (i = 0; i < nb_unlinks; i++)
990 if (queues[i] >= dev->data->nb_queues) {
995 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1001 for (i = 0; i < diag; i++)
1002 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1008 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1010 struct rte_eventdev *dev;
1012 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1013 dev = &rte_eventdevs[dev_id];
1014 if (!is_valid_port(dev, port_id)) {
1015 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1019 /* Return 0 if the PMD does not implement unlinks in progress.
1020 * This allows PMDs which handle unlink synchronously to not implement
1021 * this function at all.
1023 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1025 return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1026 dev->data->ports[port_id]);
1030 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1031 uint8_t queues[], uint8_t priorities[])
1033 struct rte_eventdev *dev;
1034 uint16_t *links_map;
1037 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1038 dev = &rte_eventdevs[dev_id];
1039 if (!is_valid_port(dev, port_id)) {
1040 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1044 links_map = dev->data->links_map;
1045 /* Point links_map to this port specific area */
1046 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1047 for (i = 0; i < dev->data->nb_queues; i++) {
1048 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1050 priorities[count] = (uint8_t)links_map[i];
1058 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1059 uint64_t *timeout_ticks)
1061 struct rte_eventdev *dev;
1063 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1064 dev = &rte_eventdevs[dev_id];
1065 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1067 if (timeout_ticks == NULL)
1070 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1074 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1076 struct rte_eventdev *dev;
1078 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1079 dev = &rte_eventdevs[dev_id];
1081 if (service_id == NULL)
1084 if (dev->data->service_inited)
1085 *service_id = dev->data->service_id;
1087 return dev->data->service_inited ? 0 : -ESRCH;
1091 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1093 struct rte_eventdev *dev;
1095 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1096 dev = &rte_eventdevs[dev_id];
1097 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1099 (*dev->dev_ops->dump)(dev, f);
1105 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1106 uint8_t queue_port_id)
1108 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1109 if (dev->dev_ops->xstats_get_names != NULL)
1110 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1117 rte_event_dev_xstats_names_get(uint8_t dev_id,
1118 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1119 struct rte_event_dev_xstats_name *xstats_names,
1120 unsigned int *ids, unsigned int size)
1122 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1123 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1125 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1126 (int)size < cnt_expected_entries)
1127 return cnt_expected_entries;
1129 /* dev_id checked above */
1130 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1132 if (dev->dev_ops->xstats_get_names != NULL)
1133 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1134 queue_port_id, xstats_names, ids, size);
1139 /* retrieve eventdev extended statistics */
1141 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1142 uint8_t queue_port_id, const unsigned int ids[],
1143 uint64_t values[], unsigned int n)
1145 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1146 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1148 /* implemented by the driver */
1149 if (dev->dev_ops->xstats_get != NULL)
1150 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1156 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1159 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1160 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1161 unsigned int temp = -1;
1164 *id = (unsigned int)-1;
1166 id = &temp; /* ensure driver never gets a NULL value */
1168 /* implemented by driver */
1169 if (dev->dev_ops->xstats_get_by_name != NULL)
1170 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1174 int rte_event_dev_xstats_reset(uint8_t dev_id,
1175 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1176 const uint32_t ids[], uint32_t nb_ids)
1178 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1179 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1181 if (dev->dev_ops->xstats_reset != NULL)
1182 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1187 int rte_event_dev_selftest(uint8_t dev_id)
1189 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1190 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1192 if (dev->dev_ops->dev_selftest != NULL)
1193 return (*dev->dev_ops->dev_selftest)();
1198 rte_event_dev_start(uint8_t dev_id)
1200 struct rte_eventdev *dev;
1203 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1205 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1206 dev = &rte_eventdevs[dev_id];
1207 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1209 if (dev->data->dev_started != 0) {
1210 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1215 diag = (*dev->dev_ops->dev_start)(dev);
1217 dev->data->dev_started = 1;
1225 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1226 eventdev_stop_flush_t callback, void *userdata)
1228 struct rte_eventdev *dev;
1230 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1232 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1233 dev = &rte_eventdevs[dev_id];
1235 dev->dev_ops->dev_stop_flush = callback;
1236 dev->data->dev_stop_flush_arg = userdata;
1242 rte_event_dev_stop(uint8_t dev_id)
1244 struct rte_eventdev *dev;
1246 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1248 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1249 dev = &rte_eventdevs[dev_id];
1250 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1252 if (dev->data->dev_started == 0) {
1253 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1258 dev->data->dev_started = 0;
1259 (*dev->dev_ops->dev_stop)(dev);
1263 rte_event_dev_close(uint8_t dev_id)
1265 struct rte_eventdev *dev;
1267 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1268 dev = &rte_eventdevs[dev_id];
1269 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1271 /* Device must be stopped before it can be closed */
1272 if (dev->data->dev_started == 1) {
1273 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1278 return (*dev->dev_ops->dev_close)(dev);
1282 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1285 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1286 const struct rte_memzone *mz;
1289 /* Generate memzone name */
1290 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1291 if (n >= (int)sizeof(mz_name))
1294 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1295 mz = rte_memzone_reserve(mz_name,
1296 sizeof(struct rte_eventdev_data),
1299 mz = rte_memzone_lookup(mz_name);
1305 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1306 memset(*data, 0, sizeof(struct rte_eventdev_data));
1311 static inline uint8_t
1312 rte_eventdev_find_free_device_index(void)
1316 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1317 if (rte_eventdevs[dev_id].attached ==
1318 RTE_EVENTDEV_DETACHED)
1321 return RTE_EVENT_MAX_DEVS;
1325 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1326 __rte_unused struct rte_event ev[],
1327 __rte_unused uint16_t nb_events)
1329 rte_errno = ENOTSUP;
1333 struct rte_eventdev *
1334 rte_event_pmd_allocate(const char *name, int socket_id)
1336 struct rte_eventdev *eventdev;
1339 if (rte_event_pmd_get_named_dev(name) != NULL) {
1340 RTE_EDEV_LOG_ERR("Event device with name %s already "
1341 "allocated!", name);
1345 dev_id = rte_eventdev_find_free_device_index();
1346 if (dev_id == RTE_EVENT_MAX_DEVS) {
1347 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1351 eventdev = &rte_eventdevs[dev_id];
1353 eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1354 eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
1356 if (eventdev->data == NULL) {
1357 struct rte_eventdev_data *eventdev_data = NULL;
1359 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1362 if (retval < 0 || eventdev_data == NULL)
1365 eventdev->data = eventdev_data;
1367 strlcpy(eventdev->data->name, name, RTE_EVENTDEV_NAME_MAX_LEN);
1369 eventdev->data->dev_id = dev_id;
1370 eventdev->data->socket_id = socket_id;
1371 eventdev->data->dev_started = 0;
1373 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1375 eventdev_globals.nb_devs++;
1382 rte_event_pmd_release(struct rte_eventdev *eventdev)
1385 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1386 const struct rte_memzone *mz;
1388 if (eventdev == NULL)
1391 eventdev->attached = RTE_EVENTDEV_DETACHED;
1392 eventdev_globals.nb_devs--;
1394 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1395 rte_free(eventdev->data->dev_private);
1397 /* Generate memzone name */
1398 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1399 eventdev->data->dev_id);
1400 if (ret >= (int)sizeof(mz_name))
1403 mz = rte_memzone_lookup(mz_name);
1407 ret = rte_memzone_free(mz);
1412 eventdev->data = NULL;