1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_byteorder.h>
18 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32 #include <rte_cryptodev.h>
33 #include <rte_cryptodev_pmd.h>
35 #include "rte_eventdev.h"
36 #include "rte_eventdev_pmd.h"
38 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
40 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
42 static struct rte_eventdev_global eventdev_globals = {
46 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
48 /* Event dev north bound API implementation */
51 rte_event_dev_count(void)
53 return rte_eventdev_globals->nb_devs;
57 rte_event_dev_get_dev_id(const char *name)
65 for (i = 0; i < rte_eventdev_globals->nb_devs; i++) {
66 cmp = (strncmp(rte_event_devices[i].data->name, name,
67 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
68 (rte_event_devices[i].dev ? (strncmp(
69 rte_event_devices[i].dev->driver->name, name,
70 RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
71 if (cmp && (rte_event_devices[i].attached ==
72 RTE_EVENTDEV_ATTACHED))
79 rte_event_dev_socket_id(uint8_t dev_id)
81 struct rte_eventdev *dev;
83 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
84 dev = &rte_eventdevs[dev_id];
86 return dev->data->socket_id;
90 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
92 struct rte_eventdev *dev;
94 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
95 dev = &rte_eventdevs[dev_id];
100 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
102 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
103 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
105 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
107 dev_info->dev = dev->dev;
112 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
115 struct rte_eventdev *dev;
117 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
118 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
120 dev = &rte_eventdevs[dev_id];
126 return dev->dev_ops->eth_rx_adapter_caps_get ?
127 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
128 &rte_eth_devices[eth_port_id],
133 int __rte_experimental
134 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
136 struct rte_eventdev *dev;
137 const struct rte_event_timer_adapter_ops *ops;
139 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
141 dev = &rte_eventdevs[dev_id];
147 return dev->dev_ops->timer_adapter_caps_get ?
148 (*dev->dev_ops->timer_adapter_caps_get)(dev,
155 int __rte_experimental
156 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
159 struct rte_eventdev *dev;
160 struct rte_cryptodev *cdev;
162 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
163 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
166 dev = &rte_eventdevs[dev_id];
167 cdev = rte_cryptodev_pmd_get_dev(cdev_id);
173 return dev->dev_ops->crypto_adapter_caps_get ?
174 (*dev->dev_ops->crypto_adapter_caps_get)
175 (dev, cdev, caps) : -ENOTSUP;
179 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
181 uint8_t old_nb_queues = dev->data->nb_queues;
182 struct rte_event_queue_conf *queues_cfg;
185 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
188 /* First time configuration */
189 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
190 /* Allocate memory to store queue configuration */
191 dev->data->queues_cfg = rte_zmalloc_socket(
192 "eventdev->data->queues_cfg",
193 sizeof(dev->data->queues_cfg[0]) * nb_queues,
194 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
195 if (dev->data->queues_cfg == NULL) {
196 dev->data->nb_queues = 0;
197 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
198 "nb_queues %u", nb_queues);
202 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
203 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
205 for (i = nb_queues; i < old_nb_queues; i++)
206 (*dev->dev_ops->queue_release)(dev, i);
208 /* Re allocate memory to store queue configuration */
209 queues_cfg = dev->data->queues_cfg;
210 queues_cfg = rte_realloc(queues_cfg,
211 sizeof(queues_cfg[0]) * nb_queues,
212 RTE_CACHE_LINE_SIZE);
213 if (queues_cfg == NULL) {
214 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
215 " nb_queues %u", nb_queues);
218 dev->data->queues_cfg = queues_cfg;
220 if (nb_queues > old_nb_queues) {
221 uint8_t new_qs = nb_queues - old_nb_queues;
223 memset(queues_cfg + old_nb_queues, 0,
224 sizeof(queues_cfg[0]) * new_qs);
226 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
227 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
229 for (i = nb_queues; i < old_nb_queues; i++)
230 (*dev->dev_ops->queue_release)(dev, i);
233 dev->data->nb_queues = nb_queues;
237 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
240 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
242 uint8_t old_nb_ports = dev->data->nb_ports;
245 struct rte_event_port_conf *ports_cfg;
248 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
251 /* First time configuration */
252 if (dev->data->ports == NULL && nb_ports != 0) {
253 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
254 sizeof(dev->data->ports[0]) * nb_ports,
255 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
256 if (dev->data->ports == NULL) {
257 dev->data->nb_ports = 0;
258 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
259 "nb_ports %u", nb_ports);
263 /* Allocate memory to store port configurations */
264 dev->data->ports_cfg =
265 rte_zmalloc_socket("eventdev->ports_cfg",
266 sizeof(dev->data->ports_cfg[0]) * nb_ports,
267 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
268 if (dev->data->ports_cfg == NULL) {
269 dev->data->nb_ports = 0;
270 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
271 "nb_ports %u", nb_ports);
275 /* Allocate memory to store queue to port link connection */
276 dev->data->links_map =
277 rte_zmalloc_socket("eventdev->links_map",
278 sizeof(dev->data->links_map[0]) * nb_ports *
279 RTE_EVENT_MAX_QUEUES_PER_DEV,
280 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
281 if (dev->data->links_map == NULL) {
282 dev->data->nb_ports = 0;
283 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
284 "nb_ports %u", nb_ports);
287 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
288 dev->data->links_map[i] =
289 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
290 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
291 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
293 ports = dev->data->ports;
294 ports_cfg = dev->data->ports_cfg;
295 links_map = dev->data->links_map;
297 for (i = nb_ports; i < old_nb_ports; i++)
298 (*dev->dev_ops->port_release)(ports[i]);
300 /* Realloc memory for ports */
301 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
302 RTE_CACHE_LINE_SIZE);
304 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
305 " nb_ports %u", nb_ports);
309 /* Realloc memory for ports_cfg */
310 ports_cfg = rte_realloc(ports_cfg,
311 sizeof(ports_cfg[0]) * nb_ports,
312 RTE_CACHE_LINE_SIZE);
313 if (ports_cfg == NULL) {
314 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
315 " nb_ports %u", nb_ports);
319 /* Realloc memory to store queue to port link connection */
320 links_map = rte_realloc(links_map,
321 sizeof(dev->data->links_map[0]) * nb_ports *
322 RTE_EVENT_MAX_QUEUES_PER_DEV,
323 RTE_CACHE_LINE_SIZE);
324 if (links_map == NULL) {
325 dev->data->nb_ports = 0;
326 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
327 "nb_ports %u", nb_ports);
331 if (nb_ports > old_nb_ports) {
332 uint8_t new_ps = nb_ports - old_nb_ports;
333 unsigned int old_links_map_end =
334 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
335 unsigned int links_map_end =
336 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
338 memset(ports + old_nb_ports, 0,
339 sizeof(ports[0]) * new_ps);
340 memset(ports_cfg + old_nb_ports, 0,
341 sizeof(ports_cfg[0]) * new_ps);
342 for (i = old_links_map_end; i < links_map_end; i++)
344 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
347 dev->data->ports = ports;
348 dev->data->ports_cfg = ports_cfg;
349 dev->data->links_map = links_map;
350 } else if (dev->data->ports != NULL && nb_ports == 0) {
351 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
353 ports = dev->data->ports;
354 for (i = nb_ports; i < old_nb_ports; i++)
355 (*dev->dev_ops->port_release)(ports[i]);
358 dev->data->nb_ports = nb_ports;
363 rte_event_dev_configure(uint8_t dev_id,
364 const struct rte_event_dev_config *dev_conf)
366 struct rte_eventdev *dev;
367 struct rte_event_dev_info info;
370 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
371 dev = &rte_eventdevs[dev_id];
373 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
374 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
376 if (dev->data->dev_started) {
378 "device %d must be stopped to allow configuration", dev_id);
382 if (dev_conf == NULL)
385 (*dev->dev_ops->dev_infos_get)(dev, &info);
387 /* Check dequeue_timeout_ns value is in limit */
388 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
389 if (dev_conf->dequeue_timeout_ns &&
390 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
391 || dev_conf->dequeue_timeout_ns >
392 info.max_dequeue_timeout_ns)) {
393 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
394 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
395 dev_id, dev_conf->dequeue_timeout_ns,
396 info.min_dequeue_timeout_ns,
397 info.max_dequeue_timeout_ns);
402 /* Check nb_events_limit is in limit */
403 if (dev_conf->nb_events_limit > info.max_num_events) {
404 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
405 dev_id, dev_conf->nb_events_limit, info.max_num_events);
409 /* Check nb_event_queues is in limit */
410 if (!dev_conf->nb_event_queues) {
411 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
415 if (dev_conf->nb_event_queues > info.max_event_queues) {
416 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
417 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
421 /* Check nb_event_ports is in limit */
422 if (!dev_conf->nb_event_ports) {
423 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
426 if (dev_conf->nb_event_ports > info.max_event_ports) {
427 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
428 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
432 /* Check nb_event_queue_flows is in limit */
433 if (!dev_conf->nb_event_queue_flows) {
434 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
437 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
438 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
439 dev_id, dev_conf->nb_event_queue_flows,
440 info.max_event_queue_flows);
444 /* Check nb_event_port_dequeue_depth is in limit */
445 if (!dev_conf->nb_event_port_dequeue_depth) {
446 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
450 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
451 (dev_conf->nb_event_port_dequeue_depth >
452 info.max_event_port_dequeue_depth)) {
453 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
454 dev_id, dev_conf->nb_event_port_dequeue_depth,
455 info.max_event_port_dequeue_depth);
459 /* Check nb_event_port_enqueue_depth is in limit */
460 if (!dev_conf->nb_event_port_enqueue_depth) {
461 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
465 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
466 (dev_conf->nb_event_port_enqueue_depth >
467 info.max_event_port_enqueue_depth)) {
468 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
469 dev_id, dev_conf->nb_event_port_enqueue_depth,
470 info.max_event_port_enqueue_depth);
474 /* Copy the dev_conf parameter into the dev structure */
475 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
477 /* Setup new number of queues and reconfigure device. */
478 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
480 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
485 /* Setup new number of ports and reconfigure device. */
486 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
488 rte_event_dev_queue_config(dev, 0);
489 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
494 /* Configure the device */
495 diag = (*dev->dev_ops->dev_configure)(dev);
497 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
498 rte_event_dev_queue_config(dev, 0);
499 rte_event_dev_port_config(dev, 0);
502 dev->data->event_dev_cap = info.event_dev_cap;
507 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
509 if (queue_id < dev->data->nb_queues && queue_id <
510 RTE_EVENT_MAX_QUEUES_PER_DEV)
517 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
518 struct rte_event_queue_conf *queue_conf)
520 struct rte_eventdev *dev;
522 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
523 dev = &rte_eventdevs[dev_id];
525 if (queue_conf == NULL)
528 if (!is_valid_queue(dev, queue_id)) {
529 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
533 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
534 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
535 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
540 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
543 !(queue_conf->event_queue_cfg &
544 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
545 ((queue_conf->event_queue_cfg &
546 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
547 (queue_conf->schedule_type
548 == RTE_SCHED_TYPE_ATOMIC)
556 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
559 !(queue_conf->event_queue_cfg &
560 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
561 ((queue_conf->event_queue_cfg &
562 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
563 (queue_conf->schedule_type
564 == RTE_SCHED_TYPE_ORDERED)
573 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
574 const struct rte_event_queue_conf *queue_conf)
576 struct rte_eventdev *dev;
577 struct rte_event_queue_conf def_conf;
579 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
580 dev = &rte_eventdevs[dev_id];
582 if (!is_valid_queue(dev, queue_id)) {
583 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
587 /* Check nb_atomic_flows limit */
588 if (is_valid_atomic_queue_conf(queue_conf)) {
589 if (queue_conf->nb_atomic_flows == 0 ||
590 queue_conf->nb_atomic_flows >
591 dev->data->dev_conf.nb_event_queue_flows) {
593 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
594 dev_id, queue_id, queue_conf->nb_atomic_flows,
595 dev->data->dev_conf.nb_event_queue_flows);
600 /* Check nb_atomic_order_sequences limit */
601 if (is_valid_ordered_queue_conf(queue_conf)) {
602 if (queue_conf->nb_atomic_order_sequences == 0 ||
603 queue_conf->nb_atomic_order_sequences >
604 dev->data->dev_conf.nb_event_queue_flows) {
606 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
607 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
608 dev->data->dev_conf.nb_event_queue_flows);
613 if (dev->data->dev_started) {
615 "device %d must be stopped to allow queue setup", dev_id);
619 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
621 if (queue_conf == NULL) {
622 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
624 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
625 queue_conf = &def_conf;
628 dev->data->queues_cfg[queue_id] = *queue_conf;
629 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
633 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
635 if (port_id < dev->data->nb_ports)
642 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
643 struct rte_event_port_conf *port_conf)
645 struct rte_eventdev *dev;
647 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
648 dev = &rte_eventdevs[dev_id];
650 if (port_conf == NULL)
653 if (!is_valid_port(dev, port_id)) {
654 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
658 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
659 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
660 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
665 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
666 const struct rte_event_port_conf *port_conf)
668 struct rte_eventdev *dev;
669 struct rte_event_port_conf def_conf;
672 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
673 dev = &rte_eventdevs[dev_id];
675 if (!is_valid_port(dev, port_id)) {
676 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
680 /* Check new_event_threshold limit */
681 if ((port_conf && !port_conf->new_event_threshold) ||
682 (port_conf && port_conf->new_event_threshold >
683 dev->data->dev_conf.nb_events_limit)) {
685 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
686 dev_id, port_id, port_conf->new_event_threshold,
687 dev->data->dev_conf.nb_events_limit);
691 /* Check dequeue_depth limit */
692 if ((port_conf && !port_conf->dequeue_depth) ||
693 (port_conf && port_conf->dequeue_depth >
694 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
696 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
697 dev_id, port_id, port_conf->dequeue_depth,
698 dev->data->dev_conf.nb_event_port_dequeue_depth);
702 /* Check enqueue_depth limit */
703 if ((port_conf && !port_conf->enqueue_depth) ||
704 (port_conf && port_conf->enqueue_depth >
705 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
707 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
708 dev_id, port_id, port_conf->enqueue_depth,
709 dev->data->dev_conf.nb_event_port_enqueue_depth);
713 if (port_conf && port_conf->disable_implicit_release &&
714 !(dev->data->event_dev_cap &
715 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
717 "dev%d port%d Implicit release disable not supported",
722 if (dev->data->dev_started) {
724 "device %d must be stopped to allow port setup", dev_id);
728 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
730 if (port_conf == NULL) {
731 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
733 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
734 port_conf = &def_conf;
737 dev->data->ports_cfg[port_id] = *port_conf;
739 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
741 /* Unlink all the queues from this port(default state after setup) */
743 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
752 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
753 uint32_t *attr_value)
755 struct rte_eventdev *dev;
759 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
760 dev = &rte_eventdevs[dev_id];
763 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
764 *attr_value = dev->data->nb_ports;
766 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
767 *attr_value = dev->data->nb_queues;
769 case RTE_EVENT_DEV_ATTR_STARTED:
770 *attr_value = dev->data->dev_started;
780 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
781 uint32_t *attr_value)
783 struct rte_eventdev *dev;
788 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
789 dev = &rte_eventdevs[dev_id];
790 if (!is_valid_port(dev, port_id)) {
791 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
796 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
797 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
799 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
800 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
802 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
803 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
812 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
813 uint32_t *attr_value)
815 struct rte_event_queue_conf *conf;
816 struct rte_eventdev *dev;
821 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
822 dev = &rte_eventdevs[dev_id];
823 if (!is_valid_queue(dev, queue_id)) {
824 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
828 conf = &dev->data->queues_cfg[queue_id];
831 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
832 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
833 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
834 *attr_value = conf->priority;
836 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
837 *attr_value = conf->nb_atomic_flows;
839 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
840 *attr_value = conf->nb_atomic_order_sequences;
842 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
843 *attr_value = conf->event_queue_cfg;
845 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
846 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
849 *attr_value = conf->schedule_type;
858 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
859 const uint8_t queues[], const uint8_t priorities[],
862 struct rte_eventdev *dev;
863 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
864 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
868 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
869 dev = &rte_eventdevs[dev_id];
871 if (*dev->dev_ops->port_link == NULL) {
872 RTE_PMD_DEBUG_TRACE("Function not supported\n");
873 rte_errno = -ENOTSUP;
877 if (!is_valid_port(dev, port_id)) {
878 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
883 if (queues == NULL) {
884 for (i = 0; i < dev->data->nb_queues; i++)
887 queues = queues_list;
888 nb_links = dev->data->nb_queues;
891 if (priorities == NULL) {
892 for (i = 0; i < nb_links; i++)
893 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
895 priorities = priorities_list;
898 for (i = 0; i < nb_links; i++)
899 if (queues[i] >= dev->data->nb_queues) {
904 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
905 queues, priorities, nb_links);
909 links_map = dev->data->links_map;
910 /* Point links_map to this port specific area */
911 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
912 for (i = 0; i < diag; i++)
913 links_map[queues[i]] = (uint8_t)priorities[i];
919 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
920 uint8_t queues[], uint16_t nb_unlinks)
922 struct rte_eventdev *dev;
923 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
927 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
928 dev = &rte_eventdevs[dev_id];
930 if (*dev->dev_ops->port_unlink == NULL) {
931 RTE_PMD_DEBUG_TRACE("Function not supported\n");
932 rte_errno = -ENOTSUP;
936 if (!is_valid_port(dev, port_id)) {
937 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
942 links_map = dev->data->links_map;
943 /* Point links_map to this port specific area */
944 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
946 if (queues == NULL) {
948 for (i = 0; i < dev->data->nb_queues; i++) {
950 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
957 for (j = 0; j < nb_unlinks; j++) {
958 if (links_map[queues[j]] ==
959 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
965 for (i = 0; i < nb_unlinks; i++)
966 if (queues[i] >= dev->data->nb_queues) {
971 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
977 for (i = 0; i < diag; i++)
978 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
984 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
985 uint8_t queues[], uint8_t priorities[])
987 struct rte_eventdev *dev;
991 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
992 dev = &rte_eventdevs[dev_id];
993 if (!is_valid_port(dev, port_id)) {
994 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
998 links_map = dev->data->links_map;
999 /* Point links_map to this port specific area */
1000 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1001 for (i = 0; i < dev->data->nb_queues; i++) {
1002 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1004 priorities[count] = (uint8_t)links_map[i];
1012 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1013 uint64_t *timeout_ticks)
1015 struct rte_eventdev *dev;
1017 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1018 dev = &rte_eventdevs[dev_id];
1019 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1021 if (timeout_ticks == NULL)
1024 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1028 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1030 struct rte_eventdev *dev;
1032 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1033 dev = &rte_eventdevs[dev_id];
1035 if (service_id == NULL)
1038 if (dev->data->service_inited)
1039 *service_id = dev->data->service_id;
1041 return dev->data->service_inited ? 0 : -ESRCH;
1045 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1047 struct rte_eventdev *dev;
1049 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1050 dev = &rte_eventdevs[dev_id];
1051 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1053 (*dev->dev_ops->dump)(dev, f);
1059 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1060 uint8_t queue_port_id)
1062 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1063 if (dev->dev_ops->xstats_get_names != NULL)
1064 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1071 rte_event_dev_xstats_names_get(uint8_t dev_id,
1072 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1073 struct rte_event_dev_xstats_name *xstats_names,
1074 unsigned int *ids, unsigned int size)
1076 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1077 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1079 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1080 (int)size < cnt_expected_entries)
1081 return cnt_expected_entries;
1083 /* dev_id checked above */
1084 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1086 if (dev->dev_ops->xstats_get_names != NULL)
1087 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1088 queue_port_id, xstats_names, ids, size);
1093 /* retrieve eventdev extended statistics */
1095 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1096 uint8_t queue_port_id, const unsigned int ids[],
1097 uint64_t values[], unsigned int n)
1099 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1100 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1102 /* implemented by the driver */
1103 if (dev->dev_ops->xstats_get != NULL)
1104 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1110 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1113 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1114 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1115 unsigned int temp = -1;
1118 *id = (unsigned int)-1;
1120 id = &temp; /* ensure driver never gets a NULL value */
1122 /* implemented by driver */
1123 if (dev->dev_ops->xstats_get_by_name != NULL)
1124 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1128 int rte_event_dev_xstats_reset(uint8_t dev_id,
1129 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1130 const uint32_t ids[], uint32_t nb_ids)
1132 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1133 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1135 if (dev->dev_ops->xstats_reset != NULL)
1136 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1141 int rte_event_dev_selftest(uint8_t dev_id)
1143 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1144 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1146 if (dev->dev_ops->dev_selftest != NULL)
1147 return (*dev->dev_ops->dev_selftest)();
1152 rte_event_dev_start(uint8_t dev_id)
1154 struct rte_eventdev *dev;
1157 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1159 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1160 dev = &rte_eventdevs[dev_id];
1161 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1163 if (dev->data->dev_started != 0) {
1164 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1169 diag = (*dev->dev_ops->dev_start)(dev);
1171 dev->data->dev_started = 1;
1179 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1180 eventdev_stop_flush_t callback, void *userdata)
1182 struct rte_eventdev *dev;
1184 RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1186 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1187 dev = &rte_eventdevs[dev_id];
1189 dev->dev_ops->dev_stop_flush = callback;
1190 dev->data->dev_stop_flush_arg = userdata;
1196 rte_event_dev_stop(uint8_t dev_id)
1198 struct rte_eventdev *dev;
1200 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1202 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1203 dev = &rte_eventdevs[dev_id];
1204 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1206 if (dev->data->dev_started == 0) {
1207 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1212 dev->data->dev_started = 0;
1213 (*dev->dev_ops->dev_stop)(dev);
1217 rte_event_dev_close(uint8_t dev_id)
1219 struct rte_eventdev *dev;
1221 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1222 dev = &rte_eventdevs[dev_id];
1223 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1225 /* Device must be stopped before it can be closed */
1226 if (dev->data->dev_started == 1) {
1227 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1232 return (*dev->dev_ops->dev_close)(dev);
1236 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1239 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1240 const struct rte_memzone *mz;
1243 /* Generate memzone name */
1244 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1245 if (n >= (int)sizeof(mz_name))
1248 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1249 mz = rte_memzone_reserve(mz_name,
1250 sizeof(struct rte_eventdev_data),
1253 mz = rte_memzone_lookup(mz_name);
1259 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1260 memset(*data, 0, sizeof(struct rte_eventdev_data));
1265 static inline uint8_t
1266 rte_eventdev_find_free_device_index(void)
1270 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1271 if (rte_eventdevs[dev_id].attached ==
1272 RTE_EVENTDEV_DETACHED)
1275 return RTE_EVENT_MAX_DEVS;
1278 struct rte_eventdev *
1279 rte_event_pmd_allocate(const char *name, int socket_id)
1281 struct rte_eventdev *eventdev;
1284 if (rte_event_pmd_get_named_dev(name) != NULL) {
1285 RTE_EDEV_LOG_ERR("Event device with name %s already "
1286 "allocated!", name);
1290 dev_id = rte_eventdev_find_free_device_index();
1291 if (dev_id == RTE_EVENT_MAX_DEVS) {
1292 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1296 eventdev = &rte_eventdevs[dev_id];
1298 if (eventdev->data == NULL) {
1299 struct rte_eventdev_data *eventdev_data = NULL;
1301 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1304 if (retval < 0 || eventdev_data == NULL)
1307 eventdev->data = eventdev_data;
1309 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1312 eventdev->data->dev_id = dev_id;
1313 eventdev->data->socket_id = socket_id;
1314 eventdev->data->dev_started = 0;
1316 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1318 eventdev_globals.nb_devs++;
1325 rte_event_pmd_release(struct rte_eventdev *eventdev)
1328 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1329 const struct rte_memzone *mz;
1331 if (eventdev == NULL)
1334 eventdev->attached = RTE_EVENTDEV_DETACHED;
1335 eventdev_globals.nb_devs--;
1337 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1338 rte_free(eventdev->data->dev_private);
1340 /* Generate memzone name */
1341 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1342 eventdev->data->dev_id);
1343 if (ret >= (int)sizeof(mz_name))
1346 mz = rte_memzone_lookup(mz_name);
1350 ret = rte_memzone_free(mz);
1355 eventdev->data = NULL;