4 * Copyright(c) 2016 Cavium networks. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
44 #include <rte_byteorder.h>
46 #include <rte_debug.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_common.h>
58 #include <rte_malloc.h>
59 #include <rte_errno.h>
61 #include "rte_eventdev.h"
62 #include "rte_eventdev_pmd.h"
64 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
66 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
68 static struct rte_eventdev_global eventdev_globals = {
72 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
74 /* Event dev north bound API implementation */
77 rte_event_dev_count(void)
79 return rte_eventdev_globals->nb_devs;
83 rte_event_dev_get_dev_id(const char *name)
90 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
91 if ((strcmp(rte_event_devices[i].data->name, name)
93 (rte_event_devices[i].attached ==
94 RTE_EVENTDEV_ATTACHED))
100 rte_event_dev_socket_id(uint8_t dev_id)
102 struct rte_eventdev *dev;
104 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
105 dev = &rte_eventdevs[dev_id];
107 return dev->data->socket_id;
111 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
113 struct rte_eventdev *dev;
115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
116 dev = &rte_eventdevs[dev_id];
118 if (dev_info == NULL)
121 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
124 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
126 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
128 dev_info->pci_dev = dev->pci_dev;
130 dev_info->driver_name = dev->driver->pci_drv.driver.name;
135 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
137 uint8_t old_nb_queues = dev->data->nb_queues;
138 uint8_t *queues_prio;
141 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
144 /* First time configuration */
145 if (dev->data->queues_prio == NULL && nb_queues != 0) {
146 /* Allocate memory to store queue priority */
147 dev->data->queues_prio = rte_zmalloc_socket(
148 "eventdev->data->queues_prio",
149 sizeof(dev->data->queues_prio[0]) * nb_queues,
150 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
151 if (dev->data->queues_prio == NULL) {
152 dev->data->nb_queues = 0;
153 RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
154 "nb_queues %u", nb_queues);
158 } else if (dev->data->queues_prio != NULL && nb_queues != 0) {
159 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
161 for (i = nb_queues; i < old_nb_queues; i++)
162 (*dev->dev_ops->queue_release)(dev, i);
164 /* Re allocate memory to store queue priority */
165 queues_prio = dev->data->queues_prio;
166 queues_prio = rte_realloc(queues_prio,
167 sizeof(queues_prio[0]) * nb_queues,
168 RTE_CACHE_LINE_SIZE);
169 if (queues_prio == NULL) {
170 RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
171 " nb_queues %u", nb_queues);
174 dev->data->queues_prio = queues_prio;
176 if (nb_queues > old_nb_queues) {
177 uint8_t new_qs = nb_queues - old_nb_queues;
179 memset(queues_prio + old_nb_queues, 0,
180 sizeof(queues_prio[0]) * new_qs);
182 } else if (dev->data->queues_prio != NULL && nb_queues == 0) {
183 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
185 for (i = nb_queues; i < old_nb_queues; i++)
186 (*dev->dev_ops->queue_release)(dev, i);
189 dev->data->nb_queues = nb_queues;
194 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
196 uint8_t old_nb_ports = dev->data->nb_ports;
199 uint8_t *ports_dequeue_depth;
200 uint8_t *ports_enqueue_depth;
203 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
206 /* First time configuration */
207 if (dev->data->ports == NULL && nb_ports != 0) {
208 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
209 sizeof(dev->data->ports[0]) * nb_ports,
210 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
211 if (dev->data->ports == NULL) {
212 dev->data->nb_ports = 0;
213 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
214 "nb_ports %u", nb_ports);
218 /* Allocate memory to store ports dequeue depth */
219 dev->data->ports_dequeue_depth =
220 rte_zmalloc_socket("eventdev->ports_dequeue_depth",
221 sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,
222 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
223 if (dev->data->ports_dequeue_depth == NULL) {
224 dev->data->nb_ports = 0;
225 RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
226 "nb_ports %u", nb_ports);
230 /* Allocate memory to store ports enqueue depth */
231 dev->data->ports_enqueue_depth =
232 rte_zmalloc_socket("eventdev->ports_enqueue_depth",
233 sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,
234 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
235 if (dev->data->ports_enqueue_depth == NULL) {
236 dev->data->nb_ports = 0;
237 RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
238 "nb_ports %u", nb_ports);
242 /* Allocate memory to store queue to port link connection */
243 dev->data->links_map =
244 rte_zmalloc_socket("eventdev->links_map",
245 sizeof(dev->data->links_map[0]) * nb_ports *
246 RTE_EVENT_MAX_QUEUES_PER_DEV,
247 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
248 if (dev->data->links_map == NULL) {
249 dev->data->nb_ports = 0;
250 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
251 "nb_ports %u", nb_ports);
254 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
255 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
257 ports = dev->data->ports;
258 ports_dequeue_depth = dev->data->ports_dequeue_depth;
259 ports_enqueue_depth = dev->data->ports_enqueue_depth;
260 links_map = dev->data->links_map;
262 for (i = nb_ports; i < old_nb_ports; i++)
263 (*dev->dev_ops->port_release)(ports[i]);
265 /* Realloc memory for ports */
266 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
267 RTE_CACHE_LINE_SIZE);
269 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
270 " nb_ports %u", nb_ports);
274 /* Realloc memory for ports_dequeue_depth */
275 ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
276 sizeof(ports_dequeue_depth[0]) * nb_ports,
277 RTE_CACHE_LINE_SIZE);
278 if (ports_dequeue_depth == NULL) {
279 RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
280 " nb_ports %u", nb_ports);
284 /* Realloc memory for ports_enqueue_depth */
285 ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
286 sizeof(ports_enqueue_depth[0]) * nb_ports,
287 RTE_CACHE_LINE_SIZE);
288 if (ports_enqueue_depth == NULL) {
289 RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
290 " nb_ports %u", nb_ports);
294 /* Realloc memory to store queue to port link connection */
295 links_map = rte_realloc(links_map,
296 sizeof(dev->data->links_map[0]) * nb_ports *
297 RTE_EVENT_MAX_QUEUES_PER_DEV,
298 RTE_CACHE_LINE_SIZE);
299 if (dev->data->links_map == NULL) {
300 dev->data->nb_ports = 0;
301 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
302 "nb_ports %u", nb_ports);
306 if (nb_ports > old_nb_ports) {
307 uint8_t new_ps = nb_ports - old_nb_ports;
309 memset(ports + old_nb_ports, 0,
310 sizeof(ports[0]) * new_ps);
311 memset(ports_dequeue_depth + old_nb_ports, 0,
312 sizeof(ports_dequeue_depth[0]) * new_ps);
313 memset(ports_enqueue_depth + old_nb_ports, 0,
314 sizeof(ports_enqueue_depth[0]) * new_ps);
316 (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),
317 0, sizeof(ports_enqueue_depth[0]) * new_ps);
320 dev->data->ports = ports;
321 dev->data->ports_dequeue_depth = ports_dequeue_depth;
322 dev->data->ports_enqueue_depth = ports_enqueue_depth;
323 dev->data->links_map = links_map;
324 } else if (dev->data->ports != NULL && nb_ports == 0) {
325 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
327 ports = dev->data->ports;
328 for (i = nb_ports; i < old_nb_ports; i++)
329 (*dev->dev_ops->port_release)(ports[i]);
332 dev->data->nb_ports = nb_ports;
337 rte_event_dev_configure(uint8_t dev_id,
338 const struct rte_event_dev_config *dev_conf)
340 struct rte_eventdev *dev;
341 struct rte_event_dev_info info;
344 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
345 dev = &rte_eventdevs[dev_id];
347 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
348 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
350 if (dev->data->dev_started) {
352 "device %d must be stopped to allow configuration", dev_id);
356 if (dev_conf == NULL)
359 (*dev->dev_ops->dev_infos_get)(dev, &info);
361 /* Check dequeue_timeout_ns value is in limit */
362 if (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
363 if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
364 || dev_conf->dequeue_timeout_ns >
365 info.max_dequeue_timeout_ns) {
366 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
367 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
368 dev_id, dev_conf->dequeue_timeout_ns,
369 info.min_dequeue_timeout_ns,
370 info.max_dequeue_timeout_ns);
375 /* Check nb_events_limit is in limit */
376 if (dev_conf->nb_events_limit > info.max_num_events) {
377 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
378 dev_id, dev_conf->nb_events_limit, info.max_num_events);
382 /* Check nb_event_queues is in limit */
383 if (!dev_conf->nb_event_queues) {
384 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
388 if (dev_conf->nb_event_queues > info.max_event_queues) {
389 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
390 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
394 /* Check nb_event_ports is in limit */
395 if (!dev_conf->nb_event_ports) {
396 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
399 if (dev_conf->nb_event_ports > info.max_event_ports) {
400 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
401 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
405 /* Check nb_event_queue_flows is in limit */
406 if (!dev_conf->nb_event_queue_flows) {
407 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
410 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
411 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
412 dev_id, dev_conf->nb_event_queue_flows,
413 info.max_event_queue_flows);
417 /* Check nb_event_port_dequeue_depth is in limit */
418 if (!dev_conf->nb_event_port_dequeue_depth) {
419 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
423 if (dev_conf->nb_event_port_dequeue_depth >
424 info.max_event_port_dequeue_depth) {
425 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
426 dev_id, dev_conf->nb_event_port_dequeue_depth,
427 info.max_event_port_dequeue_depth);
431 /* Check nb_event_port_enqueue_depth is in limit */
432 if (!dev_conf->nb_event_port_enqueue_depth) {
433 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
437 if (dev_conf->nb_event_port_enqueue_depth >
438 info.max_event_port_enqueue_depth) {
439 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
440 dev_id, dev_conf->nb_event_port_enqueue_depth,
441 info.max_event_port_enqueue_depth);
445 /* Copy the dev_conf parameter into the dev structure */
446 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
448 /* Setup new number of queues and reconfigure device. */
449 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
451 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
456 /* Setup new number of ports and reconfigure device. */
457 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
459 rte_event_dev_queue_config(dev, 0);
460 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
465 /* Configure the device */
466 diag = (*dev->dev_ops->dev_configure)(dev);
468 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
469 rte_event_dev_queue_config(dev, 0);
470 rte_event_dev_port_config(dev, 0);
473 dev->data->event_dev_cap = info.event_dev_cap;
478 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
480 if (queue_id < dev->data->nb_queues && queue_id <
481 RTE_EVENT_MAX_QUEUES_PER_DEV)
488 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
489 struct rte_event_queue_conf *queue_conf)
491 struct rte_eventdev *dev;
493 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
494 dev = &rte_eventdevs[dev_id];
496 if (queue_conf == NULL)
499 if (!is_valid_queue(dev, queue_id)) {
500 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
504 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
505 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
506 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
511 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
514 ((queue_conf->event_queue_cfg &
515 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
516 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
517 ((queue_conf->event_queue_cfg &
518 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
519 == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
527 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
530 ((queue_conf->event_queue_cfg &
531 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
532 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
533 ((queue_conf->event_queue_cfg &
534 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
535 == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
544 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
545 const struct rte_event_queue_conf *queue_conf)
547 struct rte_eventdev *dev;
548 struct rte_event_queue_conf def_conf;
550 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
551 dev = &rte_eventdevs[dev_id];
553 if (!is_valid_queue(dev, queue_id)) {
554 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
558 /* Check nb_atomic_flows limit */
559 if (is_valid_atomic_queue_conf(queue_conf)) {
560 if (queue_conf->nb_atomic_flows == 0 ||
561 queue_conf->nb_atomic_flows >
562 dev->data->dev_conf.nb_event_queue_flows) {
564 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
565 dev_id, queue_id, queue_conf->nb_atomic_flows,
566 dev->data->dev_conf.nb_event_queue_flows);
571 /* Check nb_atomic_order_sequences limit */
572 if (is_valid_ordered_queue_conf(queue_conf)) {
573 if (queue_conf->nb_atomic_order_sequences == 0 ||
574 queue_conf->nb_atomic_order_sequences >
575 dev->data->dev_conf.nb_event_queue_flows) {
577 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
578 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
579 dev->data->dev_conf.nb_event_queue_flows);
584 if (dev->data->dev_started) {
586 "device %d must be stopped to allow queue setup", dev_id);
590 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
592 if (queue_conf == NULL) {
593 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
595 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
596 def_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT;
597 queue_conf = &def_conf;
600 dev->data->queues_prio[queue_id] = queue_conf->priority;
601 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
605 rte_event_queue_count(uint8_t dev_id)
607 struct rte_eventdev *dev;
609 dev = &rte_eventdevs[dev_id];
610 return dev->data->nb_queues;
614 rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
616 struct rte_eventdev *dev;
618 dev = &rte_eventdevs[dev_id];
619 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
620 return dev->data->queues_prio[queue_id];
622 return RTE_EVENT_DEV_PRIORITY_NORMAL;
626 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
628 if (port_id < dev->data->nb_ports)
635 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
636 struct rte_event_port_conf *port_conf)
638 struct rte_eventdev *dev;
640 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
641 dev = &rte_eventdevs[dev_id];
643 if (port_conf == NULL)
646 if (!is_valid_port(dev, port_id)) {
647 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
651 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
652 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
653 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
658 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
659 const struct rte_event_port_conf *port_conf)
661 struct rte_eventdev *dev;
662 struct rte_event_port_conf def_conf;
665 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
666 dev = &rte_eventdevs[dev_id];
668 if (!is_valid_port(dev, port_id)) {
669 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
673 /* Check new_event_threshold limit */
674 if ((port_conf && !port_conf->new_event_threshold) ||
675 (port_conf && port_conf->new_event_threshold >
676 dev->data->dev_conf.nb_events_limit)) {
678 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
679 dev_id, port_id, port_conf->new_event_threshold,
680 dev->data->dev_conf.nb_events_limit);
684 /* Check dequeue_depth limit */
685 if ((port_conf && !port_conf->dequeue_depth) ||
686 (port_conf && port_conf->dequeue_depth >
687 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
689 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
690 dev_id, port_id, port_conf->dequeue_depth,
691 dev->data->dev_conf.nb_event_port_dequeue_depth);
695 /* Check enqueue_depth limit */
696 if ((port_conf && !port_conf->enqueue_depth) ||
697 (port_conf && port_conf->enqueue_depth >
698 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
700 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
701 dev_id, port_id, port_conf->enqueue_depth,
702 dev->data->dev_conf.nb_event_port_enqueue_depth);
706 if (dev->data->dev_started) {
708 "device %d must be stopped to allow port setup", dev_id);
712 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
714 if (port_conf == NULL) {
715 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
717 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
718 port_conf = &def_conf;
721 dev->data->ports_dequeue_depth[port_id] =
722 port_conf->dequeue_depth;
723 dev->data->ports_enqueue_depth[port_id] =
724 port_conf->enqueue_depth;
726 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
728 /* Unlink all the queues from this port(default state after setup) */
730 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
739 rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
741 struct rte_eventdev *dev;
743 dev = &rte_eventdevs[dev_id];
744 return dev->data->ports_dequeue_depth[port_id];
748 rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
750 struct rte_eventdev *dev;
752 dev = &rte_eventdevs[dev_id];
753 return dev->data->ports_enqueue_depth[port_id];
757 rte_event_port_count(uint8_t dev_id)
759 struct rte_eventdev *dev;
761 dev = &rte_eventdevs[dev_id];
762 return dev->data->nb_ports;
766 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
767 const uint8_t queues[], const uint8_t priorities[],
770 struct rte_eventdev *dev;
771 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
772 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
776 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
777 dev = &rte_eventdevs[dev_id];
778 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
780 if (!is_valid_port(dev, port_id)) {
781 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
785 if (queues == NULL) {
786 for (i = 0; i < dev->data->nb_queues; i++)
789 queues = queues_list;
790 nb_links = dev->data->nb_queues;
793 if (priorities == NULL) {
794 for (i = 0; i < nb_links; i++)
795 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
797 priorities = priorities_list;
800 for (i = 0; i < nb_links; i++)
801 if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
804 diag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], queues,
805 priorities, nb_links);
809 links_map = dev->data->links_map;
810 /* Point links_map to this port specific area */
811 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
812 for (i = 0; i < diag; i++)
813 links_map[queues[i]] = (uint8_t)priorities[i];
818 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
821 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
822 uint8_t queues[], uint16_t nb_unlinks)
824 struct rte_eventdev *dev;
825 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
829 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
830 dev = &rte_eventdevs[dev_id];
831 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
833 if (!is_valid_port(dev, port_id)) {
834 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
838 if (queues == NULL) {
839 for (i = 0; i < dev->data->nb_queues; i++)
842 nb_unlinks = dev->data->nb_queues;
845 for (i = 0; i < nb_unlinks; i++)
846 if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
849 diag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues,
855 links_map = dev->data->links_map;
856 /* Point links_map to this port specific area */
857 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
858 for (i = 0; i < diag; i++)
859 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
865 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
866 uint8_t queues[], uint8_t priorities[])
868 struct rte_eventdev *dev;
872 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
873 dev = &rte_eventdevs[dev_id];
874 if (!is_valid_port(dev, port_id)) {
875 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
879 links_map = dev->data->links_map;
880 /* Point links_map to this port specific area */
881 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
882 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
883 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
885 priorities[count] = (uint8_t)links_map[i];
893 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
894 uint64_t *timeout_ticks)
896 struct rte_eventdev *dev;
898 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
899 dev = &rte_eventdevs[dev_id];
900 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
902 if (timeout_ticks == NULL)
905 (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
910 rte_event_dev_dump(uint8_t dev_id, FILE *f)
912 struct rte_eventdev *dev;
914 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
915 dev = &rte_eventdevs[dev_id];
916 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
918 (*dev->dev_ops->dump)(dev, f);
924 rte_event_dev_start(uint8_t dev_id)
926 struct rte_eventdev *dev;
929 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
931 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
932 dev = &rte_eventdevs[dev_id];
933 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
935 if (dev->data->dev_started != 0) {
936 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
941 diag = (*dev->dev_ops->dev_start)(dev);
943 dev->data->dev_started = 1;
951 rte_event_dev_stop(uint8_t dev_id)
953 struct rte_eventdev *dev;
955 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
957 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
958 dev = &rte_eventdevs[dev_id];
959 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
961 if (dev->data->dev_started == 0) {
962 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
967 dev->data->dev_started = 0;
968 (*dev->dev_ops->dev_stop)(dev);
972 rte_event_dev_close(uint8_t dev_id)
974 struct rte_eventdev *dev;
976 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
977 dev = &rte_eventdevs[dev_id];
978 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
980 /* Device must be stopped before it can be closed */
981 if (dev->data->dev_started == 1) {
982 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
987 return (*dev->dev_ops->dev_close)(dev);
991 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
994 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
995 const struct rte_memzone *mz;
998 /* Generate memzone name */
999 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1000 if (n >= (int)sizeof(mz_name))
1003 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1004 mz = rte_memzone_reserve(mz_name,
1005 sizeof(struct rte_eventdev_data),
1008 mz = rte_memzone_lookup(mz_name);
1014 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1015 memset(*data, 0, sizeof(struct rte_eventdev_data));
1020 static inline uint8_t
1021 rte_eventdev_find_free_device_index(void)
1025 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1026 if (rte_eventdevs[dev_id].attached ==
1027 RTE_EVENTDEV_DETACHED)
1030 return RTE_EVENT_MAX_DEVS;
1033 struct rte_eventdev *
1034 rte_event_pmd_allocate(const char *name, int socket_id)
1036 struct rte_eventdev *eventdev;
1039 if (rte_event_pmd_get_named_dev(name) != NULL) {
1040 RTE_EDEV_LOG_ERR("Event device with name %s already "
1041 "allocated!", name);
1045 dev_id = rte_eventdev_find_free_device_index();
1046 if (dev_id == RTE_EVENT_MAX_DEVS) {
1047 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1051 eventdev = &rte_eventdevs[dev_id];
1053 if (eventdev->data == NULL) {
1054 struct rte_eventdev_data *eventdev_data = NULL;
1056 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1059 if (retval < 0 || eventdev_data == NULL)
1062 eventdev->data = eventdev_data;
1064 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1067 eventdev->data->dev_id = dev_id;
1068 eventdev->data->socket_id = socket_id;
1069 eventdev->data->dev_started = 0;
1071 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1073 eventdev_globals.nb_devs++;
1080 rte_event_pmd_release(struct rte_eventdev *eventdev)
1084 if (eventdev == NULL)
1087 ret = rte_event_dev_close(eventdev->data->dev_id);
1091 eventdev->attached = RTE_EVENTDEV_DETACHED;
1092 eventdev_globals.nb_devs--;
1093 eventdev->data = NULL;
1098 struct rte_eventdev *
1099 rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
1102 struct rte_eventdev *eventdev;
1104 /* Allocate device structure */
1105 eventdev = rte_event_pmd_allocate(name, socket_id);
1106 if (eventdev == NULL)
1109 /* Allocate private device structure */
1110 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1111 eventdev->data->dev_private =
1112 rte_zmalloc_socket("eventdev device private",
1114 RTE_CACHE_LINE_SIZE,
1117 if (eventdev->data->dev_private == NULL)
1118 rte_panic("Cannot allocate memzone for private device"
1126 rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
1127 struct rte_pci_device *pci_dev)
1129 struct rte_eventdev_driver *eventdrv;
1130 struct rte_eventdev *eventdev;
1132 char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
1136 eventdrv = (struct rte_eventdev_driver *)pci_drv;
1137 if (eventdrv == NULL)
1140 rte_eal_pci_device_name(&pci_dev->addr, eventdev_name,
1141 sizeof(eventdev_name));
1143 eventdev = rte_event_pmd_allocate(eventdev_name,
1144 pci_dev->device.numa_node);
1145 if (eventdev == NULL)
1148 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1149 eventdev->data->dev_private =
1151 "eventdev private structure",
1152 eventdrv->dev_private_size,
1153 RTE_CACHE_LINE_SIZE,
1156 if (eventdev->data->dev_private == NULL)
1157 rte_panic("Cannot allocate memzone for private "
1161 eventdev->pci_dev = pci_dev;
1162 eventdev->driver = eventdrv;
1164 /* Invoke PMD device initialization function */
1165 retval = (*eventdrv->eventdev_init)(eventdev);
1169 RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
1170 " failed", pci_drv->driver.name,
1171 (unsigned int) pci_dev->id.vendor_id,
1172 (unsigned int) pci_dev->id.device_id);
1174 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1175 rte_free(eventdev->data->dev_private);
1177 eventdev->attached = RTE_EVENTDEV_DETACHED;
1178 eventdev_globals.nb_devs--;
1184 rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev)
1186 const struct rte_eventdev_driver *eventdrv;
1187 struct rte_eventdev *eventdev;
1188 char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
1191 if (pci_dev == NULL)
1194 rte_eal_pci_device_name(&pci_dev->addr, eventdev_name,
1195 sizeof(eventdev_name));
1197 eventdev = rte_event_pmd_get_named_dev(eventdev_name);
1198 if (eventdev == NULL)
1201 eventdrv = (const struct rte_eventdev_driver *)pci_dev->driver;
1202 if (eventdrv == NULL)
1205 /* Invoke PMD device un-init function */
1206 if (*eventdrv->eventdev_uninit) {
1207 ret = (*eventdrv->eventdev_uninit)(eventdev);
1212 /* Free event device */
1213 rte_event_pmd_release(eventdev);
1215 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1216 rte_free(eventdev->data->dev_private);
1218 eventdev->pci_dev = NULL;
1219 eventdev->driver = NULL;