4 * Copyright(c) 2016 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
44 #include <rte_byteorder.h>
46 #include <rte_debug.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_common.h>
57 #include <rte_malloc.h>
58 #include <rte_errno.h>
59 #include <rte_ethdev.h>
61 #include "rte_eventdev.h"
62 #include "rte_eventdev_pmd.h"
64 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
66 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
68 static struct rte_eventdev_global eventdev_globals = {
72 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
74 /* Event dev north bound API implementation */
77 rte_event_dev_count(void)
79 return rte_eventdev_globals->nb_devs;
83 rte_event_dev_get_dev_id(const char *name)
90 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
91 if ((strcmp(rte_event_devices[i].data->name, name)
93 (rte_event_devices[i].attached ==
94 RTE_EVENTDEV_ATTACHED))
100 rte_event_dev_socket_id(uint8_t dev_id)
102 struct rte_eventdev *dev;
104 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
105 dev = &rte_eventdevs[dev_id];
107 return dev->data->socket_id;
111 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
113 struct rte_eventdev *dev;
115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
116 dev = &rte_eventdevs[dev_id];
118 if (dev_info == NULL)
121 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
124 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
126 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
128 dev_info->dev = dev->dev;
133 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
136 struct rte_eventdev *dev;
138 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
139 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
141 dev = &rte_eventdevs[dev_id];
147 return dev->dev_ops->eth_rx_adapter_caps_get ?
148 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
149 &rte_eth_devices[eth_port_id],
155 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
157 uint8_t old_nb_queues = dev->data->nb_queues;
158 struct rte_event_queue_conf *queues_cfg;
161 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
164 /* First time configuration */
165 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
166 /* Allocate memory to store queue configuration */
167 dev->data->queues_cfg = rte_zmalloc_socket(
168 "eventdev->data->queues_cfg",
169 sizeof(dev->data->queues_cfg[0]) * nb_queues,
170 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
171 if (dev->data->queues_cfg == NULL) {
172 dev->data->nb_queues = 0;
173 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
174 "nb_queues %u", nb_queues);
178 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
179 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
181 for (i = nb_queues; i < old_nb_queues; i++)
182 (*dev->dev_ops->queue_release)(dev, i);
184 /* Re allocate memory to store queue configuration */
185 queues_cfg = dev->data->queues_cfg;
186 queues_cfg = rte_realloc(queues_cfg,
187 sizeof(queues_cfg[0]) * nb_queues,
188 RTE_CACHE_LINE_SIZE);
189 if (queues_cfg == NULL) {
190 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
191 " nb_queues %u", nb_queues);
194 dev->data->queues_cfg = queues_cfg;
196 if (nb_queues > old_nb_queues) {
197 uint8_t new_qs = nb_queues - old_nb_queues;
199 memset(queues_cfg + old_nb_queues, 0,
200 sizeof(queues_cfg[0]) * new_qs);
202 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
203 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
205 for (i = nb_queues; i < old_nb_queues; i++)
206 (*dev->dev_ops->queue_release)(dev, i);
209 dev->data->nb_queues = nb_queues;
213 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
216 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
218 uint8_t old_nb_ports = dev->data->nb_ports;
221 struct rte_event_port_conf *ports_cfg;
224 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
227 /* First time configuration */
228 if (dev->data->ports == NULL && nb_ports != 0) {
229 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
230 sizeof(dev->data->ports[0]) * nb_ports,
231 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
232 if (dev->data->ports == NULL) {
233 dev->data->nb_ports = 0;
234 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
235 "nb_ports %u", nb_ports);
239 /* Allocate memory to store port configurations */
240 dev->data->ports_cfg =
241 rte_zmalloc_socket("eventdev->ports_cfg",
242 sizeof(dev->data->ports_cfg[0]) * nb_ports,
243 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
244 if (dev->data->ports_cfg == NULL) {
245 dev->data->nb_ports = 0;
246 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
247 "nb_ports %u", nb_ports);
251 /* Allocate memory to store queue to port link connection */
252 dev->data->links_map =
253 rte_zmalloc_socket("eventdev->links_map",
254 sizeof(dev->data->links_map[0]) * nb_ports *
255 RTE_EVENT_MAX_QUEUES_PER_DEV,
256 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
257 if (dev->data->links_map == NULL) {
258 dev->data->nb_ports = 0;
259 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
260 "nb_ports %u", nb_ports);
263 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
264 dev->data->links_map[i] =
265 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
266 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
267 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
269 ports = dev->data->ports;
270 ports_cfg = dev->data->ports_cfg;
271 links_map = dev->data->links_map;
273 for (i = nb_ports; i < old_nb_ports; i++)
274 (*dev->dev_ops->port_release)(ports[i]);
276 /* Realloc memory for ports */
277 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
278 RTE_CACHE_LINE_SIZE);
280 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
281 " nb_ports %u", nb_ports);
285 /* Realloc memory for ports_cfg */
286 ports_cfg = rte_realloc(ports_cfg,
287 sizeof(ports_cfg[0]) * nb_ports,
288 RTE_CACHE_LINE_SIZE);
289 if (ports_cfg == NULL) {
290 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
291 " nb_ports %u", nb_ports);
295 /* Realloc memory to store queue to port link connection */
296 links_map = rte_realloc(links_map,
297 sizeof(dev->data->links_map[0]) * nb_ports *
298 RTE_EVENT_MAX_QUEUES_PER_DEV,
299 RTE_CACHE_LINE_SIZE);
300 if (links_map == NULL) {
301 dev->data->nb_ports = 0;
302 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
303 "nb_ports %u", nb_ports);
307 if (nb_ports > old_nb_ports) {
308 uint8_t new_ps = nb_ports - old_nb_ports;
309 unsigned int old_links_map_end =
310 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
311 unsigned int links_map_end =
312 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
314 memset(ports + old_nb_ports, 0,
315 sizeof(ports[0]) * new_ps);
316 memset(ports_cfg + old_nb_ports, 0,
317 sizeof(ports_cfg[0]) * new_ps);
318 for (i = old_links_map_end; i < links_map_end; i++)
320 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
323 dev->data->ports = ports;
324 dev->data->ports_cfg = ports_cfg;
325 dev->data->links_map = links_map;
326 } else if (dev->data->ports != NULL && nb_ports == 0) {
327 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
329 ports = dev->data->ports;
330 for (i = nb_ports; i < old_nb_ports; i++)
331 (*dev->dev_ops->port_release)(ports[i]);
334 dev->data->nb_ports = nb_ports;
339 rte_event_dev_configure(uint8_t dev_id,
340 const struct rte_event_dev_config *dev_conf)
342 struct rte_eventdev *dev;
343 struct rte_event_dev_info info;
346 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
347 dev = &rte_eventdevs[dev_id];
349 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
350 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
352 if (dev->data->dev_started) {
354 "device %d must be stopped to allow configuration", dev_id);
358 if (dev_conf == NULL)
361 (*dev->dev_ops->dev_infos_get)(dev, &info);
363 /* Check dequeue_timeout_ns value is in limit */
364 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
365 if (dev_conf->dequeue_timeout_ns &&
366 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
367 || dev_conf->dequeue_timeout_ns >
368 info.max_dequeue_timeout_ns)) {
369 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
370 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
371 dev_id, dev_conf->dequeue_timeout_ns,
372 info.min_dequeue_timeout_ns,
373 info.max_dequeue_timeout_ns);
378 /* Check nb_events_limit is in limit */
379 if (dev_conf->nb_events_limit > info.max_num_events) {
380 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
381 dev_id, dev_conf->nb_events_limit, info.max_num_events);
385 /* Check nb_event_queues is in limit */
386 if (!dev_conf->nb_event_queues) {
387 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
391 if (dev_conf->nb_event_queues > info.max_event_queues) {
392 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
393 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
397 /* Check nb_event_ports is in limit */
398 if (!dev_conf->nb_event_ports) {
399 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
402 if (dev_conf->nb_event_ports > info.max_event_ports) {
403 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
404 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
408 /* Check nb_event_queue_flows is in limit */
409 if (!dev_conf->nb_event_queue_flows) {
410 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
413 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
414 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
415 dev_id, dev_conf->nb_event_queue_flows,
416 info.max_event_queue_flows);
420 /* Check nb_event_port_dequeue_depth is in limit */
421 if (!dev_conf->nb_event_port_dequeue_depth) {
422 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
426 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
427 (dev_conf->nb_event_port_dequeue_depth >
428 info.max_event_port_dequeue_depth)) {
429 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
430 dev_id, dev_conf->nb_event_port_dequeue_depth,
431 info.max_event_port_dequeue_depth);
435 /* Check nb_event_port_enqueue_depth is in limit */
436 if (!dev_conf->nb_event_port_enqueue_depth) {
437 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
441 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
442 (dev_conf->nb_event_port_enqueue_depth >
443 info.max_event_port_enqueue_depth)) {
444 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
445 dev_id, dev_conf->nb_event_port_enqueue_depth,
446 info.max_event_port_enqueue_depth);
450 /* Copy the dev_conf parameter into the dev structure */
451 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
453 /* Setup new number of queues and reconfigure device. */
454 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
456 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
461 /* Setup new number of ports and reconfigure device. */
462 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
464 rte_event_dev_queue_config(dev, 0);
465 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
470 /* Configure the device */
471 diag = (*dev->dev_ops->dev_configure)(dev);
473 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
474 rte_event_dev_queue_config(dev, 0);
475 rte_event_dev_port_config(dev, 0);
478 dev->data->event_dev_cap = info.event_dev_cap;
483 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
485 if (queue_id < dev->data->nb_queues && queue_id <
486 RTE_EVENT_MAX_QUEUES_PER_DEV)
493 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
494 struct rte_event_queue_conf *queue_conf)
496 struct rte_eventdev *dev;
498 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
499 dev = &rte_eventdevs[dev_id];
501 if (queue_conf == NULL)
504 if (!is_valid_queue(dev, queue_id)) {
505 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
509 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
510 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
511 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
516 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
519 !(queue_conf->event_queue_cfg &
520 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
521 ((queue_conf->event_queue_cfg &
522 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
523 (queue_conf->schedule_type
524 == RTE_SCHED_TYPE_ATOMIC)
532 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
535 !(queue_conf->event_queue_cfg &
536 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
537 ((queue_conf->event_queue_cfg &
538 RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
539 (queue_conf->schedule_type
540 == RTE_SCHED_TYPE_ORDERED)
549 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
550 const struct rte_event_queue_conf *queue_conf)
552 struct rte_eventdev *dev;
553 struct rte_event_queue_conf def_conf;
555 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
556 dev = &rte_eventdevs[dev_id];
558 if (!is_valid_queue(dev, queue_id)) {
559 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
563 /* Check nb_atomic_flows limit */
564 if (is_valid_atomic_queue_conf(queue_conf)) {
565 if (queue_conf->nb_atomic_flows == 0 ||
566 queue_conf->nb_atomic_flows >
567 dev->data->dev_conf.nb_event_queue_flows) {
569 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
570 dev_id, queue_id, queue_conf->nb_atomic_flows,
571 dev->data->dev_conf.nb_event_queue_flows);
576 /* Check nb_atomic_order_sequences limit */
577 if (is_valid_ordered_queue_conf(queue_conf)) {
578 if (queue_conf->nb_atomic_order_sequences == 0 ||
579 queue_conf->nb_atomic_order_sequences >
580 dev->data->dev_conf.nb_event_queue_flows) {
582 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
583 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
584 dev->data->dev_conf.nb_event_queue_flows);
589 if (dev->data->dev_started) {
591 "device %d must be stopped to allow queue setup", dev_id);
595 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
597 if (queue_conf == NULL) {
598 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
600 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
601 queue_conf = &def_conf;
604 dev->data->queues_cfg[queue_id] = *queue_conf;
605 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
609 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
611 if (port_id < dev->data->nb_ports)
618 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
619 struct rte_event_port_conf *port_conf)
621 struct rte_eventdev *dev;
623 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
624 dev = &rte_eventdevs[dev_id];
626 if (port_conf == NULL)
629 if (!is_valid_port(dev, port_id)) {
630 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
634 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
635 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
636 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
641 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
642 const struct rte_event_port_conf *port_conf)
644 struct rte_eventdev *dev;
645 struct rte_event_port_conf def_conf;
648 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
649 dev = &rte_eventdevs[dev_id];
651 if (!is_valid_port(dev, port_id)) {
652 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
656 /* Check new_event_threshold limit */
657 if ((port_conf && !port_conf->new_event_threshold) ||
658 (port_conf && port_conf->new_event_threshold >
659 dev->data->dev_conf.nb_events_limit)) {
661 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
662 dev_id, port_id, port_conf->new_event_threshold,
663 dev->data->dev_conf.nb_events_limit);
667 /* Check dequeue_depth limit */
668 if ((port_conf && !port_conf->dequeue_depth) ||
669 (port_conf && port_conf->dequeue_depth >
670 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
672 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
673 dev_id, port_id, port_conf->dequeue_depth,
674 dev->data->dev_conf.nb_event_port_dequeue_depth);
678 /* Check enqueue_depth limit */
679 if ((port_conf && !port_conf->enqueue_depth) ||
680 (port_conf && port_conf->enqueue_depth >
681 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
683 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
684 dev_id, port_id, port_conf->enqueue_depth,
685 dev->data->dev_conf.nb_event_port_enqueue_depth);
689 if (dev->data->dev_started) {
691 "device %d must be stopped to allow port setup", dev_id);
695 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
697 if (port_conf == NULL) {
698 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
700 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
701 port_conf = &def_conf;
704 dev->data->ports_cfg[port_id] = *port_conf;
706 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
708 /* Unlink all the queues from this port(default state after setup) */
710 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
719 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
720 uint32_t *attr_value)
722 struct rte_eventdev *dev;
726 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
727 dev = &rte_eventdevs[dev_id];
730 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
731 *attr_value = dev->data->nb_ports;
733 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
734 *attr_value = dev->data->nb_queues;
736 case RTE_EVENT_DEV_ATTR_STARTED:
737 *attr_value = dev->data->dev_started;
747 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
748 uint32_t *attr_value)
750 struct rte_eventdev *dev;
755 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
756 dev = &rte_eventdevs[dev_id];
757 if (!is_valid_port(dev, port_id)) {
758 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
763 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
764 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
766 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
767 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
769 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
770 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
779 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
780 uint32_t *attr_value)
782 struct rte_event_queue_conf *conf;
783 struct rte_eventdev *dev;
788 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
789 dev = &rte_eventdevs[dev_id];
790 if (!is_valid_queue(dev, queue_id)) {
791 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
795 conf = &dev->data->queues_cfg[queue_id];
798 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
799 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
800 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
801 *attr_value = conf->priority;
803 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
804 *attr_value = conf->nb_atomic_flows;
806 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
807 *attr_value = conf->nb_atomic_order_sequences;
809 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
810 *attr_value = conf->event_queue_cfg;
819 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
820 const uint8_t queues[], const uint8_t priorities[],
823 struct rte_eventdev *dev;
824 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
825 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
829 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
830 dev = &rte_eventdevs[dev_id];
831 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
833 if (!is_valid_port(dev, port_id)) {
834 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
838 if (queues == NULL) {
839 for (i = 0; i < dev->data->nb_queues; i++)
842 queues = queues_list;
843 nb_links = dev->data->nb_queues;
846 if (priorities == NULL) {
847 for (i = 0; i < nb_links; i++)
848 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
850 priorities = priorities_list;
853 for (i = 0; i < nb_links; i++)
854 if (queues[i] >= dev->data->nb_queues)
857 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
858 queues, priorities, nb_links);
862 links_map = dev->data->links_map;
863 /* Point links_map to this port specific area */
864 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
865 for (i = 0; i < diag; i++)
866 links_map[queues[i]] = (uint8_t)priorities[i];
872 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
873 uint8_t queues[], uint16_t nb_unlinks)
875 struct rte_eventdev *dev;
876 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
880 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
881 dev = &rte_eventdevs[dev_id];
882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
884 if (!is_valid_port(dev, port_id)) {
885 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
889 if (queues == NULL) {
890 for (i = 0; i < dev->data->nb_queues; i++)
893 nb_unlinks = dev->data->nb_queues;
896 for (i = 0; i < nb_unlinks; i++)
897 if (queues[i] >= dev->data->nb_queues)
900 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
906 links_map = dev->data->links_map;
907 /* Point links_map to this port specific area */
908 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
909 for (i = 0; i < diag; i++)
910 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
916 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
917 uint8_t queues[], uint8_t priorities[])
919 struct rte_eventdev *dev;
923 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
924 dev = &rte_eventdevs[dev_id];
925 if (!is_valid_port(dev, port_id)) {
926 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
930 links_map = dev->data->links_map;
931 /* Point links_map to this port specific area */
932 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
933 for (i = 0; i < dev->data->nb_queues; i++) {
934 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
936 priorities[count] = (uint8_t)links_map[i];
944 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
945 uint64_t *timeout_ticks)
947 struct rte_eventdev *dev;
949 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
950 dev = &rte_eventdevs[dev_id];
951 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
953 if (timeout_ticks == NULL)
956 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
960 rte_event_dev_dump(uint8_t dev_id, FILE *f)
962 struct rte_eventdev *dev;
964 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
965 dev = &rte_eventdevs[dev_id];
966 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
968 (*dev->dev_ops->dump)(dev, f);
974 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
975 uint8_t queue_port_id)
977 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
978 if (dev->dev_ops->xstats_get_names != NULL)
979 return (*dev->dev_ops->xstats_get_names)(dev, mode,
986 rte_event_dev_xstats_names_get(uint8_t dev_id,
987 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
988 struct rte_event_dev_xstats_name *xstats_names,
989 unsigned int *ids, unsigned int size)
991 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
992 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
994 if (xstats_names == NULL || cnt_expected_entries < 0 ||
995 (int)size < cnt_expected_entries)
996 return cnt_expected_entries;
998 /* dev_id checked above */
999 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1001 if (dev->dev_ops->xstats_get_names != NULL)
1002 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1003 queue_port_id, xstats_names, ids, size);
1008 /* retrieve eventdev extended statistics */
1010 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1011 uint8_t queue_port_id, const unsigned int ids[],
1012 uint64_t values[], unsigned int n)
1014 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1015 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1017 /* implemented by the driver */
1018 if (dev->dev_ops->xstats_get != NULL)
1019 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1025 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1028 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1029 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1030 unsigned int temp = -1;
1033 *id = (unsigned int)-1;
1035 id = &temp; /* ensure driver never gets a NULL value */
1037 /* implemented by driver */
1038 if (dev->dev_ops->xstats_get_by_name != NULL)
1039 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1043 int rte_event_dev_xstats_reset(uint8_t dev_id,
1044 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1045 const uint32_t ids[], uint32_t nb_ids)
1047 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1048 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1050 if (dev->dev_ops->xstats_reset != NULL)
1051 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1057 rte_event_dev_start(uint8_t dev_id)
1059 struct rte_eventdev *dev;
1062 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1064 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1065 dev = &rte_eventdevs[dev_id];
1066 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1068 if (dev->data->dev_started != 0) {
1069 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1074 diag = (*dev->dev_ops->dev_start)(dev);
1076 dev->data->dev_started = 1;
1084 rte_event_dev_stop(uint8_t dev_id)
1086 struct rte_eventdev *dev;
1088 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1090 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1091 dev = &rte_eventdevs[dev_id];
1092 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1094 if (dev->data->dev_started == 0) {
1095 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1100 dev->data->dev_started = 0;
1101 (*dev->dev_ops->dev_stop)(dev);
1105 rte_event_dev_close(uint8_t dev_id)
1107 struct rte_eventdev *dev;
1109 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1110 dev = &rte_eventdevs[dev_id];
1111 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1113 /* Device must be stopped before it can be closed */
1114 if (dev->data->dev_started == 1) {
1115 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1120 return (*dev->dev_ops->dev_close)(dev);
1124 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1127 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1128 const struct rte_memzone *mz;
1131 /* Generate memzone name */
1132 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1133 if (n >= (int)sizeof(mz_name))
1136 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1137 mz = rte_memzone_reserve(mz_name,
1138 sizeof(struct rte_eventdev_data),
1141 mz = rte_memzone_lookup(mz_name);
1147 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1148 memset(*data, 0, sizeof(struct rte_eventdev_data));
1153 static inline uint8_t
1154 rte_eventdev_find_free_device_index(void)
1158 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1159 if (rte_eventdevs[dev_id].attached ==
1160 RTE_EVENTDEV_DETACHED)
1163 return RTE_EVENT_MAX_DEVS;
1166 struct rte_eventdev *
1167 rte_event_pmd_allocate(const char *name, int socket_id)
1169 struct rte_eventdev *eventdev;
1172 if (rte_event_pmd_get_named_dev(name) != NULL) {
1173 RTE_EDEV_LOG_ERR("Event device with name %s already "
1174 "allocated!", name);
1178 dev_id = rte_eventdev_find_free_device_index();
1179 if (dev_id == RTE_EVENT_MAX_DEVS) {
1180 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1184 eventdev = &rte_eventdevs[dev_id];
1186 if (eventdev->data == NULL) {
1187 struct rte_eventdev_data *eventdev_data = NULL;
1189 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1192 if (retval < 0 || eventdev_data == NULL)
1195 eventdev->data = eventdev_data;
1197 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1200 eventdev->data->dev_id = dev_id;
1201 eventdev->data->socket_id = socket_id;
1202 eventdev->data->dev_started = 0;
1204 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1206 eventdev_globals.nb_devs++;
1213 rte_event_pmd_release(struct rte_eventdev *eventdev)
1216 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1217 const struct rte_memzone *mz;
1219 if (eventdev == NULL)
1222 eventdev->attached = RTE_EVENTDEV_DETACHED;
1223 eventdev_globals.nb_devs--;
1225 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1226 rte_free(eventdev->data->dev_private);
1228 /* Generate memzone name */
1229 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1230 eventdev->data->dev_id);
1231 if (ret >= (int)sizeof(mz_name))
1234 mz = rte_memzone_lookup(mz_name);
1238 ret = rte_memzone_free(mz);
1243 eventdev->data = NULL;