4 * Copyright(c) 2016 Cavium networks. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
44 #include <rte_byteorder.h>
46 #include <rte_debug.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_common.h>
58 #include <rte_malloc.h>
59 #include <rte_errno.h>
61 #include "rte_eventdev.h"
62 #include "rte_eventdev_pmd.h"
64 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
66 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
68 static struct rte_eventdev_global eventdev_globals = {
72 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
74 /* Event dev north bound API implementation */
77 rte_event_dev_count(void)
79 return rte_eventdev_globals->nb_devs;
83 rte_event_dev_get_dev_id(const char *name)
90 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
91 if ((strcmp(rte_event_devices[i].data->name, name)
93 (rte_event_devices[i].attached ==
94 RTE_EVENTDEV_ATTACHED))
100 rte_event_dev_socket_id(uint8_t dev_id)
102 struct rte_eventdev *dev;
104 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
105 dev = &rte_eventdevs[dev_id];
107 return dev->data->socket_id;
111 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
113 struct rte_eventdev *dev;
115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
116 dev = &rte_eventdevs[dev_id];
118 if (dev_info == NULL)
121 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
124 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
126 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
128 dev_info->pci_dev = dev->pci_dev;
133 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
135 uint8_t old_nb_queues = dev->data->nb_queues;
136 uint8_t *queues_prio;
139 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
142 /* First time configuration */
143 if (dev->data->queues_prio == NULL && nb_queues != 0) {
144 /* Allocate memory to store queue priority */
145 dev->data->queues_prio = rte_zmalloc_socket(
146 "eventdev->data->queues_prio",
147 sizeof(dev->data->queues_prio[0]) * nb_queues,
148 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
149 if (dev->data->queues_prio == NULL) {
150 dev->data->nb_queues = 0;
151 RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
152 "nb_queues %u", nb_queues);
156 } else if (dev->data->queues_prio != NULL && nb_queues != 0) {
157 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
159 for (i = nb_queues; i < old_nb_queues; i++)
160 (*dev->dev_ops->queue_release)(dev, i);
162 /* Re allocate memory to store queue priority */
163 queues_prio = dev->data->queues_prio;
164 queues_prio = rte_realloc(queues_prio,
165 sizeof(queues_prio[0]) * nb_queues,
166 RTE_CACHE_LINE_SIZE);
167 if (queues_prio == NULL) {
168 RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
169 " nb_queues %u", nb_queues);
172 dev->data->queues_prio = queues_prio;
174 if (nb_queues > old_nb_queues) {
175 uint8_t new_qs = nb_queues - old_nb_queues;
177 memset(queues_prio + old_nb_queues, 0,
178 sizeof(queues_prio[0]) * new_qs);
180 } else if (dev->data->queues_prio != NULL && nb_queues == 0) {
181 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
183 for (i = nb_queues; i < old_nb_queues; i++)
184 (*dev->dev_ops->queue_release)(dev, i);
187 dev->data->nb_queues = nb_queues;
192 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
194 uint8_t old_nb_ports = dev->data->nb_ports;
197 uint8_t *ports_dequeue_depth;
198 uint8_t *ports_enqueue_depth;
201 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
204 /* First time configuration */
205 if (dev->data->ports == NULL && nb_ports != 0) {
206 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
207 sizeof(dev->data->ports[0]) * nb_ports,
208 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
209 if (dev->data->ports == NULL) {
210 dev->data->nb_ports = 0;
211 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
212 "nb_ports %u", nb_ports);
216 /* Allocate memory to store ports dequeue depth */
217 dev->data->ports_dequeue_depth =
218 rte_zmalloc_socket("eventdev->ports_dequeue_depth",
219 sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,
220 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
221 if (dev->data->ports_dequeue_depth == NULL) {
222 dev->data->nb_ports = 0;
223 RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
224 "nb_ports %u", nb_ports);
228 /* Allocate memory to store ports enqueue depth */
229 dev->data->ports_enqueue_depth =
230 rte_zmalloc_socket("eventdev->ports_enqueue_depth",
231 sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,
232 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
233 if (dev->data->ports_enqueue_depth == NULL) {
234 dev->data->nb_ports = 0;
235 RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
236 "nb_ports %u", nb_ports);
240 /* Allocate memory to store queue to port link connection */
241 dev->data->links_map =
242 rte_zmalloc_socket("eventdev->links_map",
243 sizeof(dev->data->links_map[0]) * nb_ports *
244 RTE_EVENT_MAX_QUEUES_PER_DEV,
245 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
246 if (dev->data->links_map == NULL) {
247 dev->data->nb_ports = 0;
248 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
249 "nb_ports %u", nb_ports);
252 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
253 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
255 ports = dev->data->ports;
256 ports_dequeue_depth = dev->data->ports_dequeue_depth;
257 ports_enqueue_depth = dev->data->ports_enqueue_depth;
258 links_map = dev->data->links_map;
260 for (i = nb_ports; i < old_nb_ports; i++)
261 (*dev->dev_ops->port_release)(ports[i]);
263 /* Realloc memory for ports */
264 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
265 RTE_CACHE_LINE_SIZE);
267 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
268 " nb_ports %u", nb_ports);
272 /* Realloc memory for ports_dequeue_depth */
273 ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
274 sizeof(ports_dequeue_depth[0]) * nb_ports,
275 RTE_CACHE_LINE_SIZE);
276 if (ports_dequeue_depth == NULL) {
277 RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
278 " nb_ports %u", nb_ports);
282 /* Realloc memory for ports_enqueue_depth */
283 ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
284 sizeof(ports_enqueue_depth[0]) * nb_ports,
285 RTE_CACHE_LINE_SIZE);
286 if (ports_enqueue_depth == NULL) {
287 RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
288 " nb_ports %u", nb_ports);
292 /* Realloc memory to store queue to port link connection */
293 links_map = rte_realloc(links_map,
294 sizeof(dev->data->links_map[0]) * nb_ports *
295 RTE_EVENT_MAX_QUEUES_PER_DEV,
296 RTE_CACHE_LINE_SIZE);
297 if (dev->data->links_map == NULL) {
298 dev->data->nb_ports = 0;
299 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
300 "nb_ports %u", nb_ports);
304 if (nb_ports > old_nb_ports) {
305 uint8_t new_ps = nb_ports - old_nb_ports;
307 memset(ports + old_nb_ports, 0,
308 sizeof(ports[0]) * new_ps);
309 memset(ports_dequeue_depth + old_nb_ports, 0,
310 sizeof(ports_dequeue_depth[0]) * new_ps);
311 memset(ports_enqueue_depth + old_nb_ports, 0,
312 sizeof(ports_enqueue_depth[0]) * new_ps);
314 (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),
315 0, sizeof(ports_enqueue_depth[0]) * new_ps);
318 dev->data->ports = ports;
319 dev->data->ports_dequeue_depth = ports_dequeue_depth;
320 dev->data->ports_enqueue_depth = ports_enqueue_depth;
321 dev->data->links_map = links_map;
322 } else if (dev->data->ports != NULL && nb_ports == 0) {
323 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
325 ports = dev->data->ports;
326 for (i = nb_ports; i < old_nb_ports; i++)
327 (*dev->dev_ops->port_release)(ports[i]);
330 dev->data->nb_ports = nb_ports;
335 rte_event_dev_configure(uint8_t dev_id,
336 const struct rte_event_dev_config *dev_conf)
338 struct rte_eventdev *dev;
339 struct rte_event_dev_info info;
342 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
343 dev = &rte_eventdevs[dev_id];
345 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
346 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
348 if (dev->data->dev_started) {
350 "device %d must be stopped to allow configuration", dev_id);
354 if (dev_conf == NULL)
357 (*dev->dev_ops->dev_infos_get)(dev, &info);
359 /* Check dequeue_timeout_ns value is in limit */
360 if (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
361 if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
362 || dev_conf->dequeue_timeout_ns >
363 info.max_dequeue_timeout_ns) {
364 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
365 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
366 dev_id, dev_conf->dequeue_timeout_ns,
367 info.min_dequeue_timeout_ns,
368 info.max_dequeue_timeout_ns);
373 /* Check nb_events_limit is in limit */
374 if (dev_conf->nb_events_limit > info.max_num_events) {
375 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
376 dev_id, dev_conf->nb_events_limit, info.max_num_events);
380 /* Check nb_event_queues is in limit */
381 if (!dev_conf->nb_event_queues) {
382 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
386 if (dev_conf->nb_event_queues > info.max_event_queues) {
387 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
388 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
392 /* Check nb_event_ports is in limit */
393 if (!dev_conf->nb_event_ports) {
394 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
397 if (dev_conf->nb_event_ports > info.max_event_ports) {
398 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
399 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
403 /* Check nb_event_queue_flows is in limit */
404 if (!dev_conf->nb_event_queue_flows) {
405 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
408 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
409 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
410 dev_id, dev_conf->nb_event_queue_flows,
411 info.max_event_queue_flows);
415 /* Check nb_event_port_dequeue_depth is in limit */
416 if (!dev_conf->nb_event_port_dequeue_depth) {
417 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
421 if (dev_conf->nb_event_port_dequeue_depth >
422 info.max_event_port_dequeue_depth) {
423 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
424 dev_id, dev_conf->nb_event_port_dequeue_depth,
425 info.max_event_port_dequeue_depth);
429 /* Check nb_event_port_enqueue_depth is in limit */
430 if (!dev_conf->nb_event_port_enqueue_depth) {
431 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
435 if (dev_conf->nb_event_port_enqueue_depth >
436 info.max_event_port_enqueue_depth) {
437 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
438 dev_id, dev_conf->nb_event_port_enqueue_depth,
439 info.max_event_port_enqueue_depth);
443 /* Copy the dev_conf parameter into the dev structure */
444 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
446 /* Setup new number of queues and reconfigure device. */
447 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
449 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
454 /* Setup new number of ports and reconfigure device. */
455 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
457 rte_event_dev_queue_config(dev, 0);
458 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
463 /* Configure the device */
464 diag = (*dev->dev_ops->dev_configure)(dev);
466 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
467 rte_event_dev_queue_config(dev, 0);
468 rte_event_dev_port_config(dev, 0);
471 dev->data->event_dev_cap = info.event_dev_cap;
476 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
478 if (queue_id < dev->data->nb_queues && queue_id <
479 RTE_EVENT_MAX_QUEUES_PER_DEV)
486 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
487 struct rte_event_queue_conf *queue_conf)
489 struct rte_eventdev *dev;
491 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
492 dev = &rte_eventdevs[dev_id];
494 if (queue_conf == NULL)
497 if (!is_valid_queue(dev, queue_id)) {
498 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
502 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
503 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
504 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
509 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
512 ((queue_conf->event_queue_cfg &
513 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
514 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
515 ((queue_conf->event_queue_cfg &
516 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
517 == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
525 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
528 ((queue_conf->event_queue_cfg &
529 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
530 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
531 ((queue_conf->event_queue_cfg &
532 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
533 == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
542 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
543 const struct rte_event_queue_conf *queue_conf)
545 struct rte_eventdev *dev;
546 struct rte_event_queue_conf def_conf;
548 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
549 dev = &rte_eventdevs[dev_id];
551 if (!is_valid_queue(dev, queue_id)) {
552 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
556 /* Check nb_atomic_flows limit */
557 if (is_valid_atomic_queue_conf(queue_conf)) {
558 if (queue_conf->nb_atomic_flows == 0 ||
559 queue_conf->nb_atomic_flows >
560 dev->data->dev_conf.nb_event_queue_flows) {
562 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
563 dev_id, queue_id, queue_conf->nb_atomic_flows,
564 dev->data->dev_conf.nb_event_queue_flows);
569 /* Check nb_atomic_order_sequences limit */
570 if (is_valid_ordered_queue_conf(queue_conf)) {
571 if (queue_conf->nb_atomic_order_sequences == 0 ||
572 queue_conf->nb_atomic_order_sequences >
573 dev->data->dev_conf.nb_event_queue_flows) {
575 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
576 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
577 dev->data->dev_conf.nb_event_queue_flows);
582 if (dev->data->dev_started) {
584 "device %d must be stopped to allow queue setup", dev_id);
588 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
590 if (queue_conf == NULL) {
591 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
593 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
594 def_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT;
595 queue_conf = &def_conf;
598 dev->data->queues_prio[queue_id] = queue_conf->priority;
599 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
603 rte_event_queue_count(uint8_t dev_id)
605 struct rte_eventdev *dev;
607 dev = &rte_eventdevs[dev_id];
608 return dev->data->nb_queues;
612 rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
614 struct rte_eventdev *dev;
616 dev = &rte_eventdevs[dev_id];
617 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
618 return dev->data->queues_prio[queue_id];
620 return RTE_EVENT_DEV_PRIORITY_NORMAL;
624 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
626 if (port_id < dev->data->nb_ports)
633 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
634 struct rte_event_port_conf *port_conf)
636 struct rte_eventdev *dev;
638 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
639 dev = &rte_eventdevs[dev_id];
641 if (port_conf == NULL)
644 if (!is_valid_port(dev, port_id)) {
645 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
649 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
650 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
651 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
656 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
657 const struct rte_event_port_conf *port_conf)
659 struct rte_eventdev *dev;
660 struct rte_event_port_conf def_conf;
663 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
664 dev = &rte_eventdevs[dev_id];
666 if (!is_valid_port(dev, port_id)) {
667 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
671 /* Check new_event_threshold limit */
672 if ((port_conf && !port_conf->new_event_threshold) ||
673 (port_conf && port_conf->new_event_threshold >
674 dev->data->dev_conf.nb_events_limit)) {
676 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
677 dev_id, port_id, port_conf->new_event_threshold,
678 dev->data->dev_conf.nb_events_limit);
682 /* Check dequeue_depth limit */
683 if ((port_conf && !port_conf->dequeue_depth) ||
684 (port_conf && port_conf->dequeue_depth >
685 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
687 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
688 dev_id, port_id, port_conf->dequeue_depth,
689 dev->data->dev_conf.nb_event_port_dequeue_depth);
693 /* Check enqueue_depth limit */
694 if ((port_conf && !port_conf->enqueue_depth) ||
695 (port_conf && port_conf->enqueue_depth >
696 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
698 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
699 dev_id, port_id, port_conf->enqueue_depth,
700 dev->data->dev_conf.nb_event_port_enqueue_depth);
704 if (dev->data->dev_started) {
706 "device %d must be stopped to allow port setup", dev_id);
710 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
712 if (port_conf == NULL) {
713 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
715 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
716 port_conf = &def_conf;
719 dev->data->ports_dequeue_depth[port_id] =
720 port_conf->dequeue_depth;
721 dev->data->ports_enqueue_depth[port_id] =
722 port_conf->enqueue_depth;
724 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
726 /* Unlink all the queues from this port(default state after setup) */
728 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
737 rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
739 struct rte_eventdev *dev;
741 dev = &rte_eventdevs[dev_id];
742 return dev->data->ports_dequeue_depth[port_id];
746 rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
748 struct rte_eventdev *dev;
750 dev = &rte_eventdevs[dev_id];
751 return dev->data->ports_enqueue_depth[port_id];
755 rte_event_port_count(uint8_t dev_id)
757 struct rte_eventdev *dev;
759 dev = &rte_eventdevs[dev_id];
760 return dev->data->nb_ports;
764 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
765 const uint8_t queues[], const uint8_t priorities[],
768 struct rte_eventdev *dev;
769 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
770 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
774 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
775 dev = &rte_eventdevs[dev_id];
776 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
778 if (!is_valid_port(dev, port_id)) {
779 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
783 if (queues == NULL) {
784 for (i = 0; i < dev->data->nb_queues; i++)
787 queues = queues_list;
788 nb_links = dev->data->nb_queues;
791 if (priorities == NULL) {
792 for (i = 0; i < nb_links; i++)
793 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
795 priorities = priorities_list;
798 for (i = 0; i < nb_links; i++)
799 if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
802 diag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], queues,
803 priorities, nb_links);
807 links_map = dev->data->links_map;
808 /* Point links_map to this port specific area */
809 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
810 for (i = 0; i < diag; i++)
811 links_map[queues[i]] = (uint8_t)priorities[i];
816 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
819 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
820 uint8_t queues[], uint16_t nb_unlinks)
822 struct rte_eventdev *dev;
823 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
827 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
828 dev = &rte_eventdevs[dev_id];
829 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
831 if (!is_valid_port(dev, port_id)) {
832 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
836 if (queues == NULL) {
837 for (i = 0; i < dev->data->nb_queues; i++)
840 nb_unlinks = dev->data->nb_queues;
843 for (i = 0; i < nb_unlinks; i++)
844 if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
847 diag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues,
853 links_map = dev->data->links_map;
854 /* Point links_map to this port specific area */
855 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
856 for (i = 0; i < diag; i++)
857 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
863 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
864 uint8_t queues[], uint8_t priorities[])
866 struct rte_eventdev *dev;
870 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
871 dev = &rte_eventdevs[dev_id];
872 if (!is_valid_port(dev, port_id)) {
873 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
877 links_map = dev->data->links_map;
878 /* Point links_map to this port specific area */
879 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
880 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
881 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
883 priorities[count] = (uint8_t)links_map[i];
891 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
892 uint64_t *timeout_ticks)
894 struct rte_eventdev *dev;
896 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
897 dev = &rte_eventdevs[dev_id];
898 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
900 if (timeout_ticks == NULL)
903 (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
908 rte_event_dev_dump(uint8_t dev_id, FILE *f)
910 struct rte_eventdev *dev;
912 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
913 dev = &rte_eventdevs[dev_id];
914 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
916 (*dev->dev_ops->dump)(dev, f);
922 rte_event_dev_start(uint8_t dev_id)
924 struct rte_eventdev *dev;
927 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
929 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
930 dev = &rte_eventdevs[dev_id];
931 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
933 if (dev->data->dev_started != 0) {
934 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
939 diag = (*dev->dev_ops->dev_start)(dev);
941 dev->data->dev_started = 1;
949 rte_event_dev_stop(uint8_t dev_id)
951 struct rte_eventdev *dev;
953 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
955 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
956 dev = &rte_eventdevs[dev_id];
957 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
959 if (dev->data->dev_started == 0) {
960 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
965 dev->data->dev_started = 0;
966 (*dev->dev_ops->dev_stop)(dev);
970 rte_event_dev_close(uint8_t dev_id)
972 struct rte_eventdev *dev;
974 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
975 dev = &rte_eventdevs[dev_id];
976 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
978 /* Device must be stopped before it can be closed */
979 if (dev->data->dev_started == 1) {
980 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
985 return (*dev->dev_ops->dev_close)(dev);