4 * Copyright(c) 2016 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
44 #include <rte_byteorder.h>
46 #include <rte_debug.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_common.h>
57 #include <rte_malloc.h>
58 #include <rte_errno.h>
60 #include "rte_eventdev.h"
61 #include "rte_eventdev_pmd.h"
63 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
65 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
67 static struct rte_eventdev_global eventdev_globals = {
71 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
73 /* Event dev north bound API implementation */
76 rte_event_dev_count(void)
78 return rte_eventdev_globals->nb_devs;
82 rte_event_dev_get_dev_id(const char *name)
89 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
90 if ((strcmp(rte_event_devices[i].data->name, name)
92 (rte_event_devices[i].attached ==
93 RTE_EVENTDEV_ATTACHED))
99 rte_event_dev_socket_id(uint8_t dev_id)
101 struct rte_eventdev *dev;
103 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
104 dev = &rte_eventdevs[dev_id];
106 return dev->data->socket_id;
110 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
112 struct rte_eventdev *dev;
114 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
115 dev = &rte_eventdevs[dev_id];
117 if (dev_info == NULL)
120 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
122 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
123 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
125 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
127 dev_info->dev = dev->dev;
132 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
134 uint8_t old_nb_queues = dev->data->nb_queues;
135 struct rte_event_queue_conf *queues_cfg;
138 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
141 /* First time configuration */
142 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
143 /* Allocate memory to store queue configuration */
144 dev->data->queues_cfg = rte_zmalloc_socket(
145 "eventdev->data->queues_cfg",
146 sizeof(dev->data->queues_cfg[0]) * nb_queues,
147 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
148 if (dev->data->queues_cfg == NULL) {
149 dev->data->nb_queues = 0;
150 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
151 "nb_queues %u", nb_queues);
155 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
156 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
158 for (i = nb_queues; i < old_nb_queues; i++)
159 (*dev->dev_ops->queue_release)(dev, i);
161 /* Re allocate memory to store queue configuration */
162 queues_cfg = dev->data->queues_cfg;
163 queues_cfg = rte_realloc(queues_cfg,
164 sizeof(queues_cfg[0]) * nb_queues,
165 RTE_CACHE_LINE_SIZE);
166 if (queues_cfg == NULL) {
167 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
168 " nb_queues %u", nb_queues);
171 dev->data->queues_cfg = queues_cfg;
173 if (nb_queues > old_nb_queues) {
174 uint8_t new_qs = nb_queues - old_nb_queues;
176 memset(queues_cfg + old_nb_queues, 0,
177 sizeof(queues_cfg[0]) * new_qs);
179 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
180 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
182 for (i = nb_queues; i < old_nb_queues; i++)
183 (*dev->dev_ops->queue_release)(dev, i);
186 dev->data->nb_queues = nb_queues;
190 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
193 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
195 uint8_t old_nb_ports = dev->data->nb_ports;
198 struct rte_event_port_conf *ports_cfg;
201 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
204 /* First time configuration */
205 if (dev->data->ports == NULL && nb_ports != 0) {
206 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
207 sizeof(dev->data->ports[0]) * nb_ports,
208 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
209 if (dev->data->ports == NULL) {
210 dev->data->nb_ports = 0;
211 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
212 "nb_ports %u", nb_ports);
216 /* Allocate memory to store port configurations */
217 dev->data->ports_cfg =
218 rte_zmalloc_socket("eventdev->ports_cfg",
219 sizeof(dev->data->ports_cfg[0]) * nb_ports,
220 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
221 if (dev->data->ports_cfg == NULL) {
222 dev->data->nb_ports = 0;
223 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
224 "nb_ports %u", nb_ports);
228 /* Allocate memory to store queue to port link connection */
229 dev->data->links_map =
230 rte_zmalloc_socket("eventdev->links_map",
231 sizeof(dev->data->links_map[0]) * nb_ports *
232 RTE_EVENT_MAX_QUEUES_PER_DEV,
233 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
234 if (dev->data->links_map == NULL) {
235 dev->data->nb_ports = 0;
236 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
237 "nb_ports %u", nb_ports);
240 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
241 dev->data->links_map[i] =
242 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
243 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
244 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
246 ports = dev->data->ports;
247 ports_cfg = dev->data->ports_cfg;
248 links_map = dev->data->links_map;
250 for (i = nb_ports; i < old_nb_ports; i++)
251 (*dev->dev_ops->port_release)(ports[i]);
253 /* Realloc memory for ports */
254 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
255 RTE_CACHE_LINE_SIZE);
257 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
258 " nb_ports %u", nb_ports);
262 /* Realloc memory for ports_cfg */
263 ports_cfg = rte_realloc(ports_cfg,
264 sizeof(ports_cfg[0]) * nb_ports,
265 RTE_CACHE_LINE_SIZE);
266 if (ports_cfg == NULL) {
267 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
268 " nb_ports %u", nb_ports);
272 /* Realloc memory to store queue to port link connection */
273 links_map = rte_realloc(links_map,
274 sizeof(dev->data->links_map[0]) * nb_ports *
275 RTE_EVENT_MAX_QUEUES_PER_DEV,
276 RTE_CACHE_LINE_SIZE);
277 if (links_map == NULL) {
278 dev->data->nb_ports = 0;
279 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
280 "nb_ports %u", nb_ports);
284 if (nb_ports > old_nb_ports) {
285 uint8_t new_ps = nb_ports - old_nb_ports;
286 unsigned int old_links_map_end =
287 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
288 unsigned int links_map_end =
289 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
291 memset(ports + old_nb_ports, 0,
292 sizeof(ports[0]) * new_ps);
293 memset(ports_cfg + old_nb_ports, 0,
294 sizeof(ports_cfg[0]) * new_ps);
295 for (i = old_links_map_end; i < links_map_end; i++)
297 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
300 dev->data->ports = ports;
301 dev->data->ports_cfg = ports_cfg;
302 dev->data->links_map = links_map;
303 } else if (dev->data->ports != NULL && nb_ports == 0) {
304 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
306 ports = dev->data->ports;
307 for (i = nb_ports; i < old_nb_ports; i++)
308 (*dev->dev_ops->port_release)(ports[i]);
311 dev->data->nb_ports = nb_ports;
316 rte_event_dev_configure(uint8_t dev_id,
317 const struct rte_event_dev_config *dev_conf)
319 struct rte_eventdev *dev;
320 struct rte_event_dev_info info;
323 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
324 dev = &rte_eventdevs[dev_id];
326 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
327 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
329 if (dev->data->dev_started) {
331 "device %d must be stopped to allow configuration", dev_id);
335 if (dev_conf == NULL)
338 (*dev->dev_ops->dev_infos_get)(dev, &info);
340 /* Check dequeue_timeout_ns value is in limit */
341 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
342 if (dev_conf->dequeue_timeout_ns &&
343 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
344 || dev_conf->dequeue_timeout_ns >
345 info.max_dequeue_timeout_ns)) {
346 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
347 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
348 dev_id, dev_conf->dequeue_timeout_ns,
349 info.min_dequeue_timeout_ns,
350 info.max_dequeue_timeout_ns);
355 /* Check nb_events_limit is in limit */
356 if (dev_conf->nb_events_limit > info.max_num_events) {
357 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
358 dev_id, dev_conf->nb_events_limit, info.max_num_events);
362 /* Check nb_event_queues is in limit */
363 if (!dev_conf->nb_event_queues) {
364 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
368 if (dev_conf->nb_event_queues > info.max_event_queues) {
369 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
370 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
374 /* Check nb_event_ports is in limit */
375 if (!dev_conf->nb_event_ports) {
376 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
379 if (dev_conf->nb_event_ports > info.max_event_ports) {
380 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
381 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
385 /* Check nb_event_queue_flows is in limit */
386 if (!dev_conf->nb_event_queue_flows) {
387 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
390 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
391 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
392 dev_id, dev_conf->nb_event_queue_flows,
393 info.max_event_queue_flows);
397 /* Check nb_event_port_dequeue_depth is in limit */
398 if (!dev_conf->nb_event_port_dequeue_depth) {
399 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
403 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
404 (dev_conf->nb_event_port_dequeue_depth >
405 info.max_event_port_dequeue_depth)) {
406 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
407 dev_id, dev_conf->nb_event_port_dequeue_depth,
408 info.max_event_port_dequeue_depth);
412 /* Check nb_event_port_enqueue_depth is in limit */
413 if (!dev_conf->nb_event_port_enqueue_depth) {
414 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
418 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
419 (dev_conf->nb_event_port_enqueue_depth >
420 info.max_event_port_enqueue_depth)) {
421 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
422 dev_id, dev_conf->nb_event_port_enqueue_depth,
423 info.max_event_port_enqueue_depth);
427 /* Copy the dev_conf parameter into the dev structure */
428 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
430 /* Setup new number of queues and reconfigure device. */
431 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
433 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
438 /* Setup new number of ports and reconfigure device. */
439 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
441 rte_event_dev_queue_config(dev, 0);
442 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
447 /* Configure the device */
448 diag = (*dev->dev_ops->dev_configure)(dev);
450 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
451 rte_event_dev_queue_config(dev, 0);
452 rte_event_dev_port_config(dev, 0);
455 dev->data->event_dev_cap = info.event_dev_cap;
460 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
462 if (queue_id < dev->data->nb_queues && queue_id <
463 RTE_EVENT_MAX_QUEUES_PER_DEV)
470 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
471 struct rte_event_queue_conf *queue_conf)
473 struct rte_eventdev *dev;
475 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
476 dev = &rte_eventdevs[dev_id];
478 if (queue_conf == NULL)
481 if (!is_valid_queue(dev, queue_id)) {
482 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
486 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
487 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
488 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
493 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
496 !(queue_conf->event_queue_cfg &
497 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
498 ((queue_conf->event_queue_cfg &
499 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
500 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
501 ((queue_conf->event_queue_cfg &
502 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
503 == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
511 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
514 !(queue_conf->event_queue_cfg &
515 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
516 ((queue_conf->event_queue_cfg &
517 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
518 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
519 ((queue_conf->event_queue_cfg &
520 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
521 == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
530 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
531 const struct rte_event_queue_conf *queue_conf)
533 struct rte_eventdev *dev;
534 struct rte_event_queue_conf def_conf;
536 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
537 dev = &rte_eventdevs[dev_id];
539 if (!is_valid_queue(dev, queue_id)) {
540 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
544 /* Check nb_atomic_flows limit */
545 if (is_valid_atomic_queue_conf(queue_conf)) {
546 if (queue_conf->nb_atomic_flows == 0 ||
547 queue_conf->nb_atomic_flows >
548 dev->data->dev_conf.nb_event_queue_flows) {
550 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
551 dev_id, queue_id, queue_conf->nb_atomic_flows,
552 dev->data->dev_conf.nb_event_queue_flows);
557 /* Check nb_atomic_order_sequences limit */
558 if (is_valid_ordered_queue_conf(queue_conf)) {
559 if (queue_conf->nb_atomic_order_sequences == 0 ||
560 queue_conf->nb_atomic_order_sequences >
561 dev->data->dev_conf.nb_event_queue_flows) {
563 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
564 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
565 dev->data->dev_conf.nb_event_queue_flows);
570 if (dev->data->dev_started) {
572 "device %d must be stopped to allow queue setup", dev_id);
576 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
578 if (queue_conf == NULL) {
579 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
581 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
582 queue_conf = &def_conf;
585 dev->data->queues_cfg[queue_id] = *queue_conf;
586 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
590 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
592 if (port_id < dev->data->nb_ports)
599 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
600 struct rte_event_port_conf *port_conf)
602 struct rte_eventdev *dev;
604 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
605 dev = &rte_eventdevs[dev_id];
607 if (port_conf == NULL)
610 if (!is_valid_port(dev, port_id)) {
611 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
615 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
616 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
617 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
622 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
623 const struct rte_event_port_conf *port_conf)
625 struct rte_eventdev *dev;
626 struct rte_event_port_conf def_conf;
629 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
630 dev = &rte_eventdevs[dev_id];
632 if (!is_valid_port(dev, port_id)) {
633 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
637 /* Check new_event_threshold limit */
638 if ((port_conf && !port_conf->new_event_threshold) ||
639 (port_conf && port_conf->new_event_threshold >
640 dev->data->dev_conf.nb_events_limit)) {
642 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
643 dev_id, port_id, port_conf->new_event_threshold,
644 dev->data->dev_conf.nb_events_limit);
648 /* Check dequeue_depth limit */
649 if ((port_conf && !port_conf->dequeue_depth) ||
650 (port_conf && port_conf->dequeue_depth >
651 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
653 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
654 dev_id, port_id, port_conf->dequeue_depth,
655 dev->data->dev_conf.nb_event_port_dequeue_depth);
659 /* Check enqueue_depth limit */
660 if ((port_conf && !port_conf->enqueue_depth) ||
661 (port_conf && port_conf->enqueue_depth >
662 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
664 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
665 dev_id, port_id, port_conf->enqueue_depth,
666 dev->data->dev_conf.nb_event_port_enqueue_depth);
670 if (dev->data->dev_started) {
672 "device %d must be stopped to allow port setup", dev_id);
676 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
678 if (port_conf == NULL) {
679 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
681 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
682 port_conf = &def_conf;
685 dev->data->ports_cfg[port_id] = *port_conf;
687 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
689 /* Unlink all the queues from this port(default state after setup) */
691 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
700 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
701 uint32_t *attr_value)
703 struct rte_eventdev *dev;
707 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
708 dev = &rte_eventdevs[dev_id];
711 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
712 *attr_value = dev->data->nb_ports;
714 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
715 *attr_value = dev->data->nb_queues;
717 case RTE_EVENT_DEV_ATTR_STARTED:
718 *attr_value = dev->data->dev_started;
728 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
729 uint32_t *attr_value)
731 struct rte_eventdev *dev;
736 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
737 dev = &rte_eventdevs[dev_id];
738 if (!is_valid_port(dev, port_id)) {
739 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
744 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
745 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
747 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
748 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
750 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
751 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
760 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
761 uint32_t *attr_value)
763 struct rte_event_queue_conf *conf;
764 struct rte_eventdev *dev;
769 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
770 dev = &rte_eventdevs[dev_id];
771 if (!is_valid_queue(dev, queue_id)) {
772 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
776 conf = &dev->data->queues_cfg[queue_id];
779 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
780 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
781 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
782 *attr_value = conf->priority;
784 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
785 *attr_value = conf->nb_atomic_flows;
787 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
788 *attr_value = conf->nb_atomic_order_sequences;
790 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
791 *attr_value = conf->event_queue_cfg;
800 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
801 const uint8_t queues[], const uint8_t priorities[],
804 struct rte_eventdev *dev;
805 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
806 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
810 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
811 dev = &rte_eventdevs[dev_id];
812 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
814 if (!is_valid_port(dev, port_id)) {
815 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
819 if (queues == NULL) {
820 for (i = 0; i < dev->data->nb_queues; i++)
823 queues = queues_list;
824 nb_links = dev->data->nb_queues;
827 if (priorities == NULL) {
828 for (i = 0; i < nb_links; i++)
829 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
831 priorities = priorities_list;
834 for (i = 0; i < nb_links; i++)
835 if (queues[i] >= dev->data->nb_queues)
838 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
839 queues, priorities, nb_links);
843 links_map = dev->data->links_map;
844 /* Point links_map to this port specific area */
845 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
846 for (i = 0; i < diag; i++)
847 links_map[queues[i]] = (uint8_t)priorities[i];
853 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
854 uint8_t queues[], uint16_t nb_unlinks)
856 struct rte_eventdev *dev;
857 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
861 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
862 dev = &rte_eventdevs[dev_id];
863 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
865 if (!is_valid_port(dev, port_id)) {
866 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
870 if (queues == NULL) {
871 for (i = 0; i < dev->data->nb_queues; i++)
874 nb_unlinks = dev->data->nb_queues;
877 for (i = 0; i < nb_unlinks; i++)
878 if (queues[i] >= dev->data->nb_queues)
881 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
887 links_map = dev->data->links_map;
888 /* Point links_map to this port specific area */
889 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
890 for (i = 0; i < diag; i++)
891 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
897 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
898 uint8_t queues[], uint8_t priorities[])
900 struct rte_eventdev *dev;
904 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
905 dev = &rte_eventdevs[dev_id];
906 if (!is_valid_port(dev, port_id)) {
907 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
911 links_map = dev->data->links_map;
912 /* Point links_map to this port specific area */
913 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
914 for (i = 0; i < dev->data->nb_queues; i++) {
915 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
917 priorities[count] = (uint8_t)links_map[i];
925 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
926 uint64_t *timeout_ticks)
928 struct rte_eventdev *dev;
930 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
931 dev = &rte_eventdevs[dev_id];
932 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
934 if (timeout_ticks == NULL)
937 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
941 rte_event_dev_dump(uint8_t dev_id, FILE *f)
943 struct rte_eventdev *dev;
945 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
946 dev = &rte_eventdevs[dev_id];
947 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
949 (*dev->dev_ops->dump)(dev, f);
955 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
956 uint8_t queue_port_id)
958 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
959 if (dev->dev_ops->xstats_get_names != NULL)
960 return (*dev->dev_ops->xstats_get_names)(dev, mode,
967 rte_event_dev_xstats_names_get(uint8_t dev_id,
968 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
969 struct rte_event_dev_xstats_name *xstats_names,
970 unsigned int *ids, unsigned int size)
972 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
973 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
975 if (xstats_names == NULL || cnt_expected_entries < 0 ||
976 (int)size < cnt_expected_entries)
977 return cnt_expected_entries;
979 /* dev_id checked above */
980 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
982 if (dev->dev_ops->xstats_get_names != NULL)
983 return (*dev->dev_ops->xstats_get_names)(dev, mode,
984 queue_port_id, xstats_names, ids, size);
989 /* retrieve eventdev extended statistics */
991 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
992 uint8_t queue_port_id, const unsigned int ids[],
993 uint64_t values[], unsigned int n)
995 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
996 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
998 /* implemented by the driver */
999 if (dev->dev_ops->xstats_get != NULL)
1000 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1006 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1009 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1010 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1011 unsigned int temp = -1;
1014 *id = (unsigned int)-1;
1016 id = &temp; /* ensure driver never gets a NULL value */
1018 /* implemented by driver */
1019 if (dev->dev_ops->xstats_get_by_name != NULL)
1020 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1024 int rte_event_dev_xstats_reset(uint8_t dev_id,
1025 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1026 const uint32_t ids[], uint32_t nb_ids)
1028 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1029 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1031 if (dev->dev_ops->xstats_reset != NULL)
1032 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1038 rte_event_dev_start(uint8_t dev_id)
1040 struct rte_eventdev *dev;
1043 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1045 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1046 dev = &rte_eventdevs[dev_id];
1047 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1049 if (dev->data->dev_started != 0) {
1050 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1055 diag = (*dev->dev_ops->dev_start)(dev);
1057 dev->data->dev_started = 1;
1065 rte_event_dev_stop(uint8_t dev_id)
1067 struct rte_eventdev *dev;
1069 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1071 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1072 dev = &rte_eventdevs[dev_id];
1073 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1075 if (dev->data->dev_started == 0) {
1076 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1081 dev->data->dev_started = 0;
1082 (*dev->dev_ops->dev_stop)(dev);
1086 rte_event_dev_close(uint8_t dev_id)
1088 struct rte_eventdev *dev;
1090 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1091 dev = &rte_eventdevs[dev_id];
1092 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1094 /* Device must be stopped before it can be closed */
1095 if (dev->data->dev_started == 1) {
1096 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1101 return (*dev->dev_ops->dev_close)(dev);
1105 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1108 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1109 const struct rte_memzone *mz;
1112 /* Generate memzone name */
1113 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1114 if (n >= (int)sizeof(mz_name))
1117 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1118 mz = rte_memzone_reserve(mz_name,
1119 sizeof(struct rte_eventdev_data),
1122 mz = rte_memzone_lookup(mz_name);
1128 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1129 memset(*data, 0, sizeof(struct rte_eventdev_data));
1134 static inline uint8_t
1135 rte_eventdev_find_free_device_index(void)
1139 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1140 if (rte_eventdevs[dev_id].attached ==
1141 RTE_EVENTDEV_DETACHED)
1144 return RTE_EVENT_MAX_DEVS;
1147 struct rte_eventdev *
1148 rte_event_pmd_allocate(const char *name, int socket_id)
1150 struct rte_eventdev *eventdev;
1153 if (rte_event_pmd_get_named_dev(name) != NULL) {
1154 RTE_EDEV_LOG_ERR("Event device with name %s already "
1155 "allocated!", name);
1159 dev_id = rte_eventdev_find_free_device_index();
1160 if (dev_id == RTE_EVENT_MAX_DEVS) {
1161 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1165 eventdev = &rte_eventdevs[dev_id];
1167 if (eventdev->data == NULL) {
1168 struct rte_eventdev_data *eventdev_data = NULL;
1170 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1173 if (retval < 0 || eventdev_data == NULL)
1176 eventdev->data = eventdev_data;
1178 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1181 eventdev->data->dev_id = dev_id;
1182 eventdev->data->socket_id = socket_id;
1183 eventdev->data->dev_started = 0;
1185 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1187 eventdev_globals.nb_devs++;
1194 rte_event_pmd_release(struct rte_eventdev *eventdev)
1197 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1198 const struct rte_memzone *mz;
1200 if (eventdev == NULL)
1203 eventdev->attached = RTE_EVENTDEV_DETACHED;
1204 eventdev_globals.nb_devs--;
1206 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1207 rte_free(eventdev->data->dev_private);
1209 /* Generate memzone name */
1210 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1211 eventdev->data->dev_id);
1212 if (ret >= (int)sizeof(mz_name))
1215 mz = rte_memzone_lookup(mz_name);
1219 ret = rte_memzone_free(mz);
1224 eventdev->data = NULL;