1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_byteorder.h>
18 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
33 #include "rte_eventdev.h"
34 #include "rte_eventdev_pmd.h"
36 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
38 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
40 static struct rte_eventdev_global eventdev_globals = {
44 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
46 /* Event dev north bound API implementation */
49 rte_event_dev_count(void)
51 return rte_eventdev_globals->nb_devs;
55 rte_event_dev_get_dev_id(const char *name)
62 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
63 if ((strcmp(rte_event_devices[i].data->name, name)
65 (rte_event_devices[i].attached ==
66 RTE_EVENTDEV_ATTACHED))
72 rte_event_dev_socket_id(uint8_t dev_id)
74 struct rte_eventdev *dev;
76 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77 dev = &rte_eventdevs[dev_id];
79 return dev->data->socket_id;
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
85 struct rte_eventdev *dev;
87 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88 dev = &rte_eventdevs[dev_id];
93 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
95 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
98 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
100 dev_info->dev = dev->dev;
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
108 struct rte_eventdev *dev;
110 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
113 dev = &rte_eventdevs[dev_id];
119 return dev->dev_ops->eth_rx_adapter_caps_get ?
120 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
121 &rte_eth_devices[eth_port_id],
127 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
129 uint8_t old_nb_queues = dev->data->nb_queues;
130 struct rte_event_queue_conf *queues_cfg;
133 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
136 /* First time configuration */
137 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
138 /* Allocate memory to store queue configuration */
139 dev->data->queues_cfg = rte_zmalloc_socket(
140 "eventdev->data->queues_cfg",
141 sizeof(dev->data->queues_cfg[0]) * nb_queues,
142 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
143 if (dev->data->queues_cfg == NULL) {
144 dev->data->nb_queues = 0;
145 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
146 "nb_queues %u", nb_queues);
150 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
151 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
153 for (i = nb_queues; i < old_nb_queues; i++)
154 (*dev->dev_ops->queue_release)(dev, i);
156 /* Re allocate memory to store queue configuration */
157 queues_cfg = dev->data->queues_cfg;
158 queues_cfg = rte_realloc(queues_cfg,
159 sizeof(queues_cfg[0]) * nb_queues,
160 RTE_CACHE_LINE_SIZE);
161 if (queues_cfg == NULL) {
162 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
163 " nb_queues %u", nb_queues);
166 dev->data->queues_cfg = queues_cfg;
168 if (nb_queues > old_nb_queues) {
169 uint8_t new_qs = nb_queues - old_nb_queues;
171 memset(queues_cfg + old_nb_queues, 0,
172 sizeof(queues_cfg[0]) * new_qs);
174 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
175 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
177 for (i = nb_queues; i < old_nb_queues; i++)
178 (*dev->dev_ops->queue_release)(dev, i);
181 dev->data->nb_queues = nb_queues;
185 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
188 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
190 uint8_t old_nb_ports = dev->data->nb_ports;
193 struct rte_event_port_conf *ports_cfg;
196 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
199 /* First time configuration */
200 if (dev->data->ports == NULL && nb_ports != 0) {
201 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
202 sizeof(dev->data->ports[0]) * nb_ports,
203 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
204 if (dev->data->ports == NULL) {
205 dev->data->nb_ports = 0;
206 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
207 "nb_ports %u", nb_ports);
211 /* Allocate memory to store port configurations */
212 dev->data->ports_cfg =
213 rte_zmalloc_socket("eventdev->ports_cfg",
214 sizeof(dev->data->ports_cfg[0]) * nb_ports,
215 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
216 if (dev->data->ports_cfg == NULL) {
217 dev->data->nb_ports = 0;
218 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
219 "nb_ports %u", nb_ports);
223 /* Allocate memory to store queue to port link connection */
224 dev->data->links_map =
225 rte_zmalloc_socket("eventdev->links_map",
226 sizeof(dev->data->links_map[0]) * nb_ports *
227 RTE_EVENT_MAX_QUEUES_PER_DEV,
228 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
229 if (dev->data->links_map == NULL) {
230 dev->data->nb_ports = 0;
231 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
232 "nb_ports %u", nb_ports);
235 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
236 dev->data->links_map[i] =
237 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
238 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
239 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
241 ports = dev->data->ports;
242 ports_cfg = dev->data->ports_cfg;
243 links_map = dev->data->links_map;
245 for (i = nb_ports; i < old_nb_ports; i++)
246 (*dev->dev_ops->port_release)(ports[i]);
248 /* Realloc memory for ports */
249 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
250 RTE_CACHE_LINE_SIZE);
252 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
253 " nb_ports %u", nb_ports);
257 /* Realloc memory for ports_cfg */
258 ports_cfg = rte_realloc(ports_cfg,
259 sizeof(ports_cfg[0]) * nb_ports,
260 RTE_CACHE_LINE_SIZE);
261 if (ports_cfg == NULL) {
262 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
263 " nb_ports %u", nb_ports);
267 /* Realloc memory to store queue to port link connection */
268 links_map = rte_realloc(links_map,
269 sizeof(dev->data->links_map[0]) * nb_ports *
270 RTE_EVENT_MAX_QUEUES_PER_DEV,
271 RTE_CACHE_LINE_SIZE);
272 if (links_map == NULL) {
273 dev->data->nb_ports = 0;
274 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
275 "nb_ports %u", nb_ports);
279 if (nb_ports > old_nb_ports) {
280 uint8_t new_ps = nb_ports - old_nb_ports;
281 unsigned int old_links_map_end =
282 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
283 unsigned int links_map_end =
284 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
286 memset(ports + old_nb_ports, 0,
287 sizeof(ports[0]) * new_ps);
288 memset(ports_cfg + old_nb_ports, 0,
289 sizeof(ports_cfg[0]) * new_ps);
290 for (i = old_links_map_end; i < links_map_end; i++)
292 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
295 dev->data->ports = ports;
296 dev->data->ports_cfg = ports_cfg;
297 dev->data->links_map = links_map;
298 } else if (dev->data->ports != NULL && nb_ports == 0) {
299 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
301 ports = dev->data->ports;
302 for (i = nb_ports; i < old_nb_ports; i++)
303 (*dev->dev_ops->port_release)(ports[i]);
306 dev->data->nb_ports = nb_ports;
311 rte_event_dev_configure(uint8_t dev_id,
312 const struct rte_event_dev_config *dev_conf)
314 struct rte_eventdev *dev;
315 struct rte_event_dev_info info;
318 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
319 dev = &rte_eventdevs[dev_id];
321 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
324 if (dev->data->dev_started) {
326 "device %d must be stopped to allow configuration", dev_id);
330 if (dev_conf == NULL)
333 (*dev->dev_ops->dev_infos_get)(dev, &info);
335 /* Check dequeue_timeout_ns value is in limit */
336 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
337 if (dev_conf->dequeue_timeout_ns &&
338 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
339 || dev_conf->dequeue_timeout_ns >
340 info.max_dequeue_timeout_ns)) {
341 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
342 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
343 dev_id, dev_conf->dequeue_timeout_ns,
344 info.min_dequeue_timeout_ns,
345 info.max_dequeue_timeout_ns);
350 /* Check nb_events_limit is in limit */
351 if (dev_conf->nb_events_limit > info.max_num_events) {
352 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
353 dev_id, dev_conf->nb_events_limit, info.max_num_events);
357 /* Check nb_event_queues is in limit */
358 if (!dev_conf->nb_event_queues) {
359 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
363 if (dev_conf->nb_event_queues > info.max_event_queues) {
364 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
365 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
369 /* Check nb_event_ports is in limit */
370 if (!dev_conf->nb_event_ports) {
371 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
374 if (dev_conf->nb_event_ports > info.max_event_ports) {
375 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
376 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
380 /* Check nb_event_queue_flows is in limit */
381 if (!dev_conf->nb_event_queue_flows) {
382 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
385 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
386 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
387 dev_id, dev_conf->nb_event_queue_flows,
388 info.max_event_queue_flows);
392 /* Check nb_event_port_dequeue_depth is in limit */
393 if (!dev_conf->nb_event_port_dequeue_depth) {
394 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
398 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
399 (dev_conf->nb_event_port_dequeue_depth >
400 info.max_event_port_dequeue_depth)) {
401 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
402 dev_id, dev_conf->nb_event_port_dequeue_depth,
403 info.max_event_port_dequeue_depth);
407 /* Check nb_event_port_enqueue_depth is in limit */
408 if (!dev_conf->nb_event_port_enqueue_depth) {
409 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
413 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
414 (dev_conf->nb_event_port_enqueue_depth >
415 info.max_event_port_enqueue_depth)) {
416 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
417 dev_id, dev_conf->nb_event_port_enqueue_depth,
418 info.max_event_port_enqueue_depth);
422 /* Copy the dev_conf parameter into the dev structure */
423 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
425 /* Setup new number of queues and reconfigure device. */
426 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
428 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
433 /* Setup new number of ports and reconfigure device. */
434 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
436 rte_event_dev_queue_config(dev, 0);
437 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
442 /* Configure the device */
443 diag = (*dev->dev_ops->dev_configure)(dev);
445 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
446 rte_event_dev_queue_config(dev, 0);
447 rte_event_dev_port_config(dev, 0);
450 dev->data->event_dev_cap = info.event_dev_cap;
455 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
457 if (queue_id < dev->data->nb_queues && queue_id <
458 RTE_EVENT_MAX_QUEUES_PER_DEV)
465 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
466 struct rte_event_queue_conf *queue_conf)
468 struct rte_eventdev *dev;
470 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
471 dev = &rte_eventdevs[dev_id];
473 if (queue_conf == NULL)
476 if (!is_valid_queue(dev, queue_id)) {
477 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
481 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
482 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
483 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
488 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
491 !(queue_conf->event_queue_cfg &
492 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
493 ((queue_conf->event_queue_cfg &
494 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
495 (queue_conf->schedule_type
496 == RTE_SCHED_TYPE_ATOMIC)
504 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
507 !(queue_conf->event_queue_cfg &
508 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
509 ((queue_conf->event_queue_cfg &
510 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
511 (queue_conf->schedule_type
512 == RTE_SCHED_TYPE_ORDERED)
521 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
522 const struct rte_event_queue_conf *queue_conf)
524 struct rte_eventdev *dev;
525 struct rte_event_queue_conf def_conf;
527 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
528 dev = &rte_eventdevs[dev_id];
530 if (!is_valid_queue(dev, queue_id)) {
531 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
535 /* Check nb_atomic_flows limit */
536 if (is_valid_atomic_queue_conf(queue_conf)) {
537 if (queue_conf->nb_atomic_flows == 0 ||
538 queue_conf->nb_atomic_flows >
539 dev->data->dev_conf.nb_event_queue_flows) {
541 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
542 dev_id, queue_id, queue_conf->nb_atomic_flows,
543 dev->data->dev_conf.nb_event_queue_flows);
548 /* Check nb_atomic_order_sequences limit */
549 if (is_valid_ordered_queue_conf(queue_conf)) {
550 if (queue_conf->nb_atomic_order_sequences == 0 ||
551 queue_conf->nb_atomic_order_sequences >
552 dev->data->dev_conf.nb_event_queue_flows) {
554 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
555 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
556 dev->data->dev_conf.nb_event_queue_flows);
561 if (dev->data->dev_started) {
563 "device %d must be stopped to allow queue setup", dev_id);
567 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
569 if (queue_conf == NULL) {
570 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
572 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
573 queue_conf = &def_conf;
576 dev->data->queues_cfg[queue_id] = *queue_conf;
577 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
581 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
583 if (port_id < dev->data->nb_ports)
590 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
591 struct rte_event_port_conf *port_conf)
593 struct rte_eventdev *dev;
595 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
596 dev = &rte_eventdevs[dev_id];
598 if (port_conf == NULL)
601 if (!is_valid_port(dev, port_id)) {
602 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
606 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
607 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
608 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
613 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
614 const struct rte_event_port_conf *port_conf)
616 struct rte_eventdev *dev;
617 struct rte_event_port_conf def_conf;
620 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
621 dev = &rte_eventdevs[dev_id];
623 if (!is_valid_port(dev, port_id)) {
624 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
628 /* Check new_event_threshold limit */
629 if ((port_conf && !port_conf->new_event_threshold) ||
630 (port_conf && port_conf->new_event_threshold >
631 dev->data->dev_conf.nb_events_limit)) {
633 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
634 dev_id, port_id, port_conf->new_event_threshold,
635 dev->data->dev_conf.nb_events_limit);
639 /* Check dequeue_depth limit */
640 if ((port_conf && !port_conf->dequeue_depth) ||
641 (port_conf && port_conf->dequeue_depth >
642 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
644 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
645 dev_id, port_id, port_conf->dequeue_depth,
646 dev->data->dev_conf.nb_event_port_dequeue_depth);
650 /* Check enqueue_depth limit */
651 if ((port_conf && !port_conf->enqueue_depth) ||
652 (port_conf && port_conf->enqueue_depth >
653 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
655 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
656 dev_id, port_id, port_conf->enqueue_depth,
657 dev->data->dev_conf.nb_event_port_enqueue_depth);
661 if (port_conf && port_conf->disable_implicit_release &&
662 !(dev->data->event_dev_cap &
663 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
665 "dev%d port%d Implicit release disable not supported",
670 if (dev->data->dev_started) {
672 "device %d must be stopped to allow port setup", dev_id);
676 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
678 if (port_conf == NULL) {
679 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
681 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
682 port_conf = &def_conf;
685 dev->data->ports_cfg[port_id] = *port_conf;
687 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
689 /* Unlink all the queues from this port(default state after setup) */
691 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
700 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
701 uint32_t *attr_value)
703 struct rte_eventdev *dev;
707 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
708 dev = &rte_eventdevs[dev_id];
711 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
712 *attr_value = dev->data->nb_ports;
714 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
715 *attr_value = dev->data->nb_queues;
717 case RTE_EVENT_DEV_ATTR_STARTED:
718 *attr_value = dev->data->dev_started;
728 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
729 uint32_t *attr_value)
731 struct rte_eventdev *dev;
736 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
737 dev = &rte_eventdevs[dev_id];
738 if (!is_valid_port(dev, port_id)) {
739 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
744 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
745 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
747 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
748 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
750 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
751 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
760 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
761 uint32_t *attr_value)
763 struct rte_event_queue_conf *conf;
764 struct rte_eventdev *dev;
769 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
770 dev = &rte_eventdevs[dev_id];
771 if (!is_valid_queue(dev, queue_id)) {
772 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
776 conf = &dev->data->queues_cfg[queue_id];
779 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
780 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
781 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
782 *attr_value = conf->priority;
784 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
785 *attr_value = conf->nb_atomic_flows;
787 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
788 *attr_value = conf->nb_atomic_order_sequences;
790 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
791 *attr_value = conf->event_queue_cfg;
793 case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
794 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
797 *attr_value = conf->schedule_type;
806 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
807 const uint8_t queues[], const uint8_t priorities[],
810 struct rte_eventdev *dev;
811 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
812 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
816 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
817 dev = &rte_eventdevs[dev_id];
819 if (*dev->dev_ops->port_link == NULL) {
820 RTE_PMD_DEBUG_TRACE("Function not supported\n");
821 rte_errno = -ENOTSUP;
825 if (!is_valid_port(dev, port_id)) {
826 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
831 if (queues == NULL) {
832 for (i = 0; i < dev->data->nb_queues; i++)
835 queues = queues_list;
836 nb_links = dev->data->nb_queues;
839 if (priorities == NULL) {
840 for (i = 0; i < nb_links; i++)
841 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
843 priorities = priorities_list;
846 for (i = 0; i < nb_links; i++)
847 if (queues[i] >= dev->data->nb_queues) {
852 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
853 queues, priorities, nb_links);
857 links_map = dev->data->links_map;
858 /* Point links_map to this port specific area */
859 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
860 for (i = 0; i < diag; i++)
861 links_map[queues[i]] = (uint8_t)priorities[i];
867 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
868 uint8_t queues[], uint16_t nb_unlinks)
870 struct rte_eventdev *dev;
871 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
875 RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
876 dev = &rte_eventdevs[dev_id];
878 if (*dev->dev_ops->port_unlink == NULL) {
879 RTE_PMD_DEBUG_TRACE("Function not supported\n");
880 rte_errno = -ENOTSUP;
884 if (!is_valid_port(dev, port_id)) {
885 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
890 links_map = dev->data->links_map;
891 /* Point links_map to this port specific area */
892 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
894 if (queues == NULL) {
896 for (i = 0; i < dev->data->nb_queues; i++) {
898 EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
905 for (j = 0; j < nb_unlinks; j++) {
906 if (links_map[queues[j]] ==
907 EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
913 for (i = 0; i < nb_unlinks; i++)
914 if (queues[i] >= dev->data->nb_queues) {
919 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
925 for (i = 0; i < diag; i++)
926 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
932 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
933 uint8_t queues[], uint8_t priorities[])
935 struct rte_eventdev *dev;
939 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
940 dev = &rte_eventdevs[dev_id];
941 if (!is_valid_port(dev, port_id)) {
942 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
946 links_map = dev->data->links_map;
947 /* Point links_map to this port specific area */
948 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
949 for (i = 0; i < dev->data->nb_queues; i++) {
950 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
952 priorities[count] = (uint8_t)links_map[i];
960 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
961 uint64_t *timeout_ticks)
963 struct rte_eventdev *dev;
965 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
966 dev = &rte_eventdevs[dev_id];
967 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
969 if (timeout_ticks == NULL)
972 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
976 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
978 struct rte_eventdev *dev;
980 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
981 dev = &rte_eventdevs[dev_id];
983 if (service_id == NULL)
986 if (dev->data->service_inited)
987 *service_id = dev->data->service_id;
989 return dev->data->service_inited ? 0 : -ESRCH;
993 rte_event_dev_dump(uint8_t dev_id, FILE *f)
995 struct rte_eventdev *dev;
997 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
998 dev = &rte_eventdevs[dev_id];
999 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1001 (*dev->dev_ops->dump)(dev, f);
1007 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1008 uint8_t queue_port_id)
1010 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1011 if (dev->dev_ops->xstats_get_names != NULL)
1012 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1019 rte_event_dev_xstats_names_get(uint8_t dev_id,
1020 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1021 struct rte_event_dev_xstats_name *xstats_names,
1022 unsigned int *ids, unsigned int size)
1024 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1025 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1027 if (xstats_names == NULL || cnt_expected_entries < 0 ||
1028 (int)size < cnt_expected_entries)
1029 return cnt_expected_entries;
1031 /* dev_id checked above */
1032 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1034 if (dev->dev_ops->xstats_get_names != NULL)
1035 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1036 queue_port_id, xstats_names, ids, size);
1041 /* retrieve eventdev extended statistics */
1043 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1044 uint8_t queue_port_id, const unsigned int ids[],
1045 uint64_t values[], unsigned int n)
1047 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1048 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1050 /* implemented by the driver */
1051 if (dev->dev_ops->xstats_get != NULL)
1052 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1058 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1061 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1062 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1063 unsigned int temp = -1;
1066 *id = (unsigned int)-1;
1068 id = &temp; /* ensure driver never gets a NULL value */
1070 /* implemented by driver */
1071 if (dev->dev_ops->xstats_get_by_name != NULL)
1072 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1076 int rte_event_dev_xstats_reset(uint8_t dev_id,
1077 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1078 const uint32_t ids[], uint32_t nb_ids)
1080 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1081 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1083 if (dev->dev_ops->xstats_reset != NULL)
1084 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1089 int rte_event_dev_selftest(uint8_t dev_id)
1091 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1092 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1094 if (dev->dev_ops->dev_selftest != NULL)
1095 return (*dev->dev_ops->dev_selftest)();
1100 rte_event_dev_start(uint8_t dev_id)
1102 struct rte_eventdev *dev;
1105 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1107 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1108 dev = &rte_eventdevs[dev_id];
1109 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1111 if (dev->data->dev_started != 0) {
1112 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1117 diag = (*dev->dev_ops->dev_start)(dev);
1119 dev->data->dev_started = 1;
1127 rte_event_dev_stop(uint8_t dev_id)
1129 struct rte_eventdev *dev;
1131 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1133 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1134 dev = &rte_eventdevs[dev_id];
1135 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1137 if (dev->data->dev_started == 0) {
1138 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1143 dev->data->dev_started = 0;
1144 (*dev->dev_ops->dev_stop)(dev);
1148 rte_event_dev_close(uint8_t dev_id)
1150 struct rte_eventdev *dev;
1152 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1153 dev = &rte_eventdevs[dev_id];
1154 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1156 /* Device must be stopped before it can be closed */
1157 if (dev->data->dev_started == 1) {
1158 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1163 return (*dev->dev_ops->dev_close)(dev);
1167 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1170 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1171 const struct rte_memzone *mz;
1174 /* Generate memzone name */
1175 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1176 if (n >= (int)sizeof(mz_name))
1179 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1180 mz = rte_memzone_reserve(mz_name,
1181 sizeof(struct rte_eventdev_data),
1184 mz = rte_memzone_lookup(mz_name);
1190 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1191 memset(*data, 0, sizeof(struct rte_eventdev_data));
1196 static inline uint8_t
1197 rte_eventdev_find_free_device_index(void)
1201 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1202 if (rte_eventdevs[dev_id].attached ==
1203 RTE_EVENTDEV_DETACHED)
1206 return RTE_EVENT_MAX_DEVS;
1209 struct rte_eventdev *
1210 rte_event_pmd_allocate(const char *name, int socket_id)
1212 struct rte_eventdev *eventdev;
1215 if (rte_event_pmd_get_named_dev(name) != NULL) {
1216 RTE_EDEV_LOG_ERR("Event device with name %s already "
1217 "allocated!", name);
1221 dev_id = rte_eventdev_find_free_device_index();
1222 if (dev_id == RTE_EVENT_MAX_DEVS) {
1223 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1227 eventdev = &rte_eventdevs[dev_id];
1229 if (eventdev->data == NULL) {
1230 struct rte_eventdev_data *eventdev_data = NULL;
1232 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1235 if (retval < 0 || eventdev_data == NULL)
1238 eventdev->data = eventdev_data;
1240 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1243 eventdev->data->dev_id = dev_id;
1244 eventdev->data->socket_id = socket_id;
1245 eventdev->data->dev_started = 0;
1247 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1249 eventdev_globals.nb_devs++;
1256 rte_event_pmd_release(struct rte_eventdev *eventdev)
1259 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1260 const struct rte_memzone *mz;
1262 if (eventdev == NULL)
1265 eventdev->attached = RTE_EVENTDEV_DETACHED;
1266 eventdev_globals.nb_devs--;
1268 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1269 rte_free(eventdev->data->dev_private);
1271 /* Generate memzone name */
1272 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1273 eventdev->data->dev_id);
1274 if (ret >= (int)sizeof(mz_name))
1277 mz = rte_memzone_lookup(mz_name);
1281 ret = rte_memzone_free(mz);
1286 eventdev->data = NULL;