4 * Copyright(c) 2016 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
44 #include <rte_byteorder.h>
46 #include <rte_debug.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_common.h>
57 #include <rte_malloc.h>
58 #include <rte_errno.h>
59 #include <rte_ethdev.h>
61 #include "rte_eventdev.h"
62 #include "rte_eventdev_pmd.h"
64 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
66 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
68 static struct rte_eventdev_global eventdev_globals = {
72 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
74 /* Event dev north bound API implementation */
77 rte_event_dev_count(void)
79 return rte_eventdev_globals->nb_devs;
83 rte_event_dev_get_dev_id(const char *name)
90 for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
91 if ((strcmp(rte_event_devices[i].data->name, name)
93 (rte_event_devices[i].attached ==
94 RTE_EVENTDEV_ATTACHED))
100 rte_event_dev_socket_id(uint8_t dev_id)
102 struct rte_eventdev *dev;
104 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
105 dev = &rte_eventdevs[dev_id];
107 return dev->data->socket_id;
111 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
113 struct rte_eventdev *dev;
115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
116 dev = &rte_eventdevs[dev_id];
118 if (dev_info == NULL)
121 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
123 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
124 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
126 dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
128 dev_info->dev = dev->dev;
133 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
136 struct rte_eventdev *dev;
138 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
139 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
141 dev = &rte_eventdevs[dev_id];
147 return dev->dev_ops->eth_rx_adapter_caps_get ?
148 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
149 &rte_eth_devices[eth_port_id],
155 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
157 uint8_t old_nb_queues = dev->data->nb_queues;
158 struct rte_event_queue_conf *queues_cfg;
161 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
164 /* First time configuration */
165 if (dev->data->queues_cfg == NULL && nb_queues != 0) {
166 /* Allocate memory to store queue configuration */
167 dev->data->queues_cfg = rte_zmalloc_socket(
168 "eventdev->data->queues_cfg",
169 sizeof(dev->data->queues_cfg[0]) * nb_queues,
170 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
171 if (dev->data->queues_cfg == NULL) {
172 dev->data->nb_queues = 0;
173 RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
174 "nb_queues %u", nb_queues);
178 } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
179 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
181 for (i = nb_queues; i < old_nb_queues; i++)
182 (*dev->dev_ops->queue_release)(dev, i);
184 /* Re allocate memory to store queue configuration */
185 queues_cfg = dev->data->queues_cfg;
186 queues_cfg = rte_realloc(queues_cfg,
187 sizeof(queues_cfg[0]) * nb_queues,
188 RTE_CACHE_LINE_SIZE);
189 if (queues_cfg == NULL) {
190 RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
191 " nb_queues %u", nb_queues);
194 dev->data->queues_cfg = queues_cfg;
196 if (nb_queues > old_nb_queues) {
197 uint8_t new_qs = nb_queues - old_nb_queues;
199 memset(queues_cfg + old_nb_queues, 0,
200 sizeof(queues_cfg[0]) * new_qs);
202 } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
203 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
205 for (i = nb_queues; i < old_nb_queues; i++)
206 (*dev->dev_ops->queue_release)(dev, i);
209 dev->data->nb_queues = nb_queues;
213 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
216 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
218 uint8_t old_nb_ports = dev->data->nb_ports;
221 struct rte_event_port_conf *ports_cfg;
224 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
227 /* First time configuration */
228 if (dev->data->ports == NULL && nb_ports != 0) {
229 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
230 sizeof(dev->data->ports[0]) * nb_ports,
231 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
232 if (dev->data->ports == NULL) {
233 dev->data->nb_ports = 0;
234 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
235 "nb_ports %u", nb_ports);
239 /* Allocate memory to store port configurations */
240 dev->data->ports_cfg =
241 rte_zmalloc_socket("eventdev->ports_cfg",
242 sizeof(dev->data->ports_cfg[0]) * nb_ports,
243 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
244 if (dev->data->ports_cfg == NULL) {
245 dev->data->nb_ports = 0;
246 RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
247 "nb_ports %u", nb_ports);
251 /* Allocate memory to store queue to port link connection */
252 dev->data->links_map =
253 rte_zmalloc_socket("eventdev->links_map",
254 sizeof(dev->data->links_map[0]) * nb_ports *
255 RTE_EVENT_MAX_QUEUES_PER_DEV,
256 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
257 if (dev->data->links_map == NULL) {
258 dev->data->nb_ports = 0;
259 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
260 "nb_ports %u", nb_ports);
263 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
264 dev->data->links_map[i] =
265 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
266 } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
267 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
269 ports = dev->data->ports;
270 ports_cfg = dev->data->ports_cfg;
271 links_map = dev->data->links_map;
273 for (i = nb_ports; i < old_nb_ports; i++)
274 (*dev->dev_ops->port_release)(ports[i]);
276 /* Realloc memory for ports */
277 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
278 RTE_CACHE_LINE_SIZE);
280 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
281 " nb_ports %u", nb_ports);
285 /* Realloc memory for ports_cfg */
286 ports_cfg = rte_realloc(ports_cfg,
287 sizeof(ports_cfg[0]) * nb_ports,
288 RTE_CACHE_LINE_SIZE);
289 if (ports_cfg == NULL) {
290 RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
291 " nb_ports %u", nb_ports);
295 /* Realloc memory to store queue to port link connection */
296 links_map = rte_realloc(links_map,
297 sizeof(dev->data->links_map[0]) * nb_ports *
298 RTE_EVENT_MAX_QUEUES_PER_DEV,
299 RTE_CACHE_LINE_SIZE);
300 if (links_map == NULL) {
301 dev->data->nb_ports = 0;
302 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
303 "nb_ports %u", nb_ports);
307 if (nb_ports > old_nb_ports) {
308 uint8_t new_ps = nb_ports - old_nb_ports;
309 unsigned int old_links_map_end =
310 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
311 unsigned int links_map_end =
312 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
314 memset(ports + old_nb_ports, 0,
315 sizeof(ports[0]) * new_ps);
316 memset(ports_cfg + old_nb_ports, 0,
317 sizeof(ports_cfg[0]) * new_ps);
318 for (i = old_links_map_end; i < links_map_end; i++)
320 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
323 dev->data->ports = ports;
324 dev->data->ports_cfg = ports_cfg;
325 dev->data->links_map = links_map;
326 } else if (dev->data->ports != NULL && nb_ports == 0) {
327 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
329 ports = dev->data->ports;
330 for (i = nb_ports; i < old_nb_ports; i++)
331 (*dev->dev_ops->port_release)(ports[i]);
334 dev->data->nb_ports = nb_ports;
339 rte_event_dev_configure(uint8_t dev_id,
340 const struct rte_event_dev_config *dev_conf)
342 struct rte_eventdev *dev;
343 struct rte_event_dev_info info;
346 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
347 dev = &rte_eventdevs[dev_id];
349 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
350 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
352 if (dev->data->dev_started) {
354 "device %d must be stopped to allow configuration", dev_id);
358 if (dev_conf == NULL)
361 (*dev->dev_ops->dev_infos_get)(dev, &info);
363 /* Check dequeue_timeout_ns value is in limit */
364 if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
365 if (dev_conf->dequeue_timeout_ns &&
366 (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
367 || dev_conf->dequeue_timeout_ns >
368 info.max_dequeue_timeout_ns)) {
369 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
370 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
371 dev_id, dev_conf->dequeue_timeout_ns,
372 info.min_dequeue_timeout_ns,
373 info.max_dequeue_timeout_ns);
378 /* Check nb_events_limit is in limit */
379 if (dev_conf->nb_events_limit > info.max_num_events) {
380 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
381 dev_id, dev_conf->nb_events_limit, info.max_num_events);
385 /* Check nb_event_queues is in limit */
386 if (!dev_conf->nb_event_queues) {
387 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
391 if (dev_conf->nb_event_queues > info.max_event_queues) {
392 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
393 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
397 /* Check nb_event_ports is in limit */
398 if (!dev_conf->nb_event_ports) {
399 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
402 if (dev_conf->nb_event_ports > info.max_event_ports) {
403 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
404 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
408 /* Check nb_event_queue_flows is in limit */
409 if (!dev_conf->nb_event_queue_flows) {
410 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
413 if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
414 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
415 dev_id, dev_conf->nb_event_queue_flows,
416 info.max_event_queue_flows);
420 /* Check nb_event_port_dequeue_depth is in limit */
421 if (!dev_conf->nb_event_port_dequeue_depth) {
422 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
426 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
427 (dev_conf->nb_event_port_dequeue_depth >
428 info.max_event_port_dequeue_depth)) {
429 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
430 dev_id, dev_conf->nb_event_port_dequeue_depth,
431 info.max_event_port_dequeue_depth);
435 /* Check nb_event_port_enqueue_depth is in limit */
436 if (!dev_conf->nb_event_port_enqueue_depth) {
437 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
441 if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
442 (dev_conf->nb_event_port_enqueue_depth >
443 info.max_event_port_enqueue_depth)) {
444 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
445 dev_id, dev_conf->nb_event_port_enqueue_depth,
446 info.max_event_port_enqueue_depth);
450 /* Copy the dev_conf parameter into the dev structure */
451 memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
453 /* Setup new number of queues and reconfigure device. */
454 diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
456 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
461 /* Setup new number of ports and reconfigure device. */
462 diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
464 rte_event_dev_queue_config(dev, 0);
465 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
470 /* Configure the device */
471 diag = (*dev->dev_ops->dev_configure)(dev);
473 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
474 rte_event_dev_queue_config(dev, 0);
475 rte_event_dev_port_config(dev, 0);
478 dev->data->event_dev_cap = info.event_dev_cap;
483 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
485 if (queue_id < dev->data->nb_queues && queue_id <
486 RTE_EVENT_MAX_QUEUES_PER_DEV)
493 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
494 struct rte_event_queue_conf *queue_conf)
496 struct rte_eventdev *dev;
498 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
499 dev = &rte_eventdevs[dev_id];
501 if (queue_conf == NULL)
504 if (!is_valid_queue(dev, queue_id)) {
505 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
509 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
510 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
511 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
516 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
519 !(queue_conf->event_queue_cfg &
520 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
521 ((queue_conf->event_queue_cfg &
522 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
523 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
524 ((queue_conf->event_queue_cfg &
525 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
526 == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
534 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
537 !(queue_conf->event_queue_cfg &
538 RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
539 ((queue_conf->event_queue_cfg &
540 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
541 == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
542 ((queue_conf->event_queue_cfg &
543 RTE_EVENT_QUEUE_CFG_TYPE_MASK)
544 == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
553 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
554 const struct rte_event_queue_conf *queue_conf)
556 struct rte_eventdev *dev;
557 struct rte_event_queue_conf def_conf;
559 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
560 dev = &rte_eventdevs[dev_id];
562 if (!is_valid_queue(dev, queue_id)) {
563 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
567 /* Check nb_atomic_flows limit */
568 if (is_valid_atomic_queue_conf(queue_conf)) {
569 if (queue_conf->nb_atomic_flows == 0 ||
570 queue_conf->nb_atomic_flows >
571 dev->data->dev_conf.nb_event_queue_flows) {
573 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
574 dev_id, queue_id, queue_conf->nb_atomic_flows,
575 dev->data->dev_conf.nb_event_queue_flows);
580 /* Check nb_atomic_order_sequences limit */
581 if (is_valid_ordered_queue_conf(queue_conf)) {
582 if (queue_conf->nb_atomic_order_sequences == 0 ||
583 queue_conf->nb_atomic_order_sequences >
584 dev->data->dev_conf.nb_event_queue_flows) {
586 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
587 dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
588 dev->data->dev_conf.nb_event_queue_flows);
593 if (dev->data->dev_started) {
595 "device %d must be stopped to allow queue setup", dev_id);
599 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
601 if (queue_conf == NULL) {
602 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
604 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
605 queue_conf = &def_conf;
608 dev->data->queues_cfg[queue_id] = *queue_conf;
609 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
613 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
615 if (port_id < dev->data->nb_ports)
622 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
623 struct rte_event_port_conf *port_conf)
625 struct rte_eventdev *dev;
627 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
628 dev = &rte_eventdevs[dev_id];
630 if (port_conf == NULL)
633 if (!is_valid_port(dev, port_id)) {
634 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
638 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
639 memset(port_conf, 0, sizeof(struct rte_event_port_conf));
640 (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
645 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
646 const struct rte_event_port_conf *port_conf)
648 struct rte_eventdev *dev;
649 struct rte_event_port_conf def_conf;
652 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
653 dev = &rte_eventdevs[dev_id];
655 if (!is_valid_port(dev, port_id)) {
656 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
660 /* Check new_event_threshold limit */
661 if ((port_conf && !port_conf->new_event_threshold) ||
662 (port_conf && port_conf->new_event_threshold >
663 dev->data->dev_conf.nb_events_limit)) {
665 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
666 dev_id, port_id, port_conf->new_event_threshold,
667 dev->data->dev_conf.nb_events_limit);
671 /* Check dequeue_depth limit */
672 if ((port_conf && !port_conf->dequeue_depth) ||
673 (port_conf && port_conf->dequeue_depth >
674 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
676 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
677 dev_id, port_id, port_conf->dequeue_depth,
678 dev->data->dev_conf.nb_event_port_dequeue_depth);
682 /* Check enqueue_depth limit */
683 if ((port_conf && !port_conf->enqueue_depth) ||
684 (port_conf && port_conf->enqueue_depth >
685 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
687 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
688 dev_id, port_id, port_conf->enqueue_depth,
689 dev->data->dev_conf.nb_event_port_enqueue_depth);
693 if (dev->data->dev_started) {
695 "device %d must be stopped to allow port setup", dev_id);
699 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
701 if (port_conf == NULL) {
702 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
704 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
705 port_conf = &def_conf;
708 dev->data->ports_cfg[port_id] = *port_conf;
710 diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
712 /* Unlink all the queues from this port(default state after setup) */
714 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
723 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
724 uint32_t *attr_value)
726 struct rte_eventdev *dev;
730 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
731 dev = &rte_eventdevs[dev_id];
734 case RTE_EVENT_DEV_ATTR_PORT_COUNT:
735 *attr_value = dev->data->nb_ports;
737 case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
738 *attr_value = dev->data->nb_queues;
740 case RTE_EVENT_DEV_ATTR_STARTED:
741 *attr_value = dev->data->dev_started;
751 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
752 uint32_t *attr_value)
754 struct rte_eventdev *dev;
759 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
760 dev = &rte_eventdevs[dev_id];
761 if (!is_valid_port(dev, port_id)) {
762 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
767 case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
768 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
770 case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
771 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
773 case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
774 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
783 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
784 uint32_t *attr_value)
786 struct rte_event_queue_conf *conf;
787 struct rte_eventdev *dev;
792 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
793 dev = &rte_eventdevs[dev_id];
794 if (!is_valid_queue(dev, queue_id)) {
795 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
799 conf = &dev->data->queues_cfg[queue_id];
802 case RTE_EVENT_QUEUE_ATTR_PRIORITY:
803 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
804 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
805 *attr_value = conf->priority;
807 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
808 *attr_value = conf->nb_atomic_flows;
810 case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
811 *attr_value = conf->nb_atomic_order_sequences;
813 case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
814 *attr_value = conf->event_queue_cfg;
823 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
824 const uint8_t queues[], const uint8_t priorities[],
827 struct rte_eventdev *dev;
828 uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
829 uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
833 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
834 dev = &rte_eventdevs[dev_id];
835 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
837 if (!is_valid_port(dev, port_id)) {
838 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
842 if (queues == NULL) {
843 for (i = 0; i < dev->data->nb_queues; i++)
846 queues = queues_list;
847 nb_links = dev->data->nb_queues;
850 if (priorities == NULL) {
851 for (i = 0; i < nb_links; i++)
852 priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
854 priorities = priorities_list;
857 for (i = 0; i < nb_links; i++)
858 if (queues[i] >= dev->data->nb_queues)
861 diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
862 queues, priorities, nb_links);
866 links_map = dev->data->links_map;
867 /* Point links_map to this port specific area */
868 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
869 for (i = 0; i < diag; i++)
870 links_map[queues[i]] = (uint8_t)priorities[i];
876 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
877 uint8_t queues[], uint16_t nb_unlinks)
879 struct rte_eventdev *dev;
880 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
884 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
885 dev = &rte_eventdevs[dev_id];
886 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
888 if (!is_valid_port(dev, port_id)) {
889 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
893 if (queues == NULL) {
894 for (i = 0; i < dev->data->nb_queues; i++)
897 nb_unlinks = dev->data->nb_queues;
900 for (i = 0; i < nb_unlinks; i++)
901 if (queues[i] >= dev->data->nb_queues)
904 diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
910 links_map = dev->data->links_map;
911 /* Point links_map to this port specific area */
912 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
913 for (i = 0; i < diag; i++)
914 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
920 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
921 uint8_t queues[], uint8_t priorities[])
923 struct rte_eventdev *dev;
927 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
928 dev = &rte_eventdevs[dev_id];
929 if (!is_valid_port(dev, port_id)) {
930 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
934 links_map = dev->data->links_map;
935 /* Point links_map to this port specific area */
936 links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
937 for (i = 0; i < dev->data->nb_queues; i++) {
938 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
940 priorities[count] = (uint8_t)links_map[i];
948 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
949 uint64_t *timeout_ticks)
951 struct rte_eventdev *dev;
953 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
954 dev = &rte_eventdevs[dev_id];
955 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
957 if (timeout_ticks == NULL)
960 return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
964 rte_event_dev_dump(uint8_t dev_id, FILE *f)
966 struct rte_eventdev *dev;
968 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
969 dev = &rte_eventdevs[dev_id];
970 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
972 (*dev->dev_ops->dump)(dev, f);
978 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
979 uint8_t queue_port_id)
981 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
982 if (dev->dev_ops->xstats_get_names != NULL)
983 return (*dev->dev_ops->xstats_get_names)(dev, mode,
990 rte_event_dev_xstats_names_get(uint8_t dev_id,
991 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
992 struct rte_event_dev_xstats_name *xstats_names,
993 unsigned int *ids, unsigned int size)
995 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
996 const int cnt_expected_entries = xstats_get_count(dev_id, mode,
998 if (xstats_names == NULL || cnt_expected_entries < 0 ||
999 (int)size < cnt_expected_entries)
1000 return cnt_expected_entries;
1002 /* dev_id checked above */
1003 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1005 if (dev->dev_ops->xstats_get_names != NULL)
1006 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1007 queue_port_id, xstats_names, ids, size);
1012 /* retrieve eventdev extended statistics */
1014 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1015 uint8_t queue_port_id, const unsigned int ids[],
1016 uint64_t values[], unsigned int n)
1018 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1019 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1021 /* implemented by the driver */
1022 if (dev->dev_ops->xstats_get != NULL)
1023 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1029 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1032 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1033 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1034 unsigned int temp = -1;
1037 *id = (unsigned int)-1;
1039 id = &temp; /* ensure driver never gets a NULL value */
1041 /* implemented by driver */
1042 if (dev->dev_ops->xstats_get_by_name != NULL)
1043 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1047 int rte_event_dev_xstats_reset(uint8_t dev_id,
1048 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1049 const uint32_t ids[], uint32_t nb_ids)
1051 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1052 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1054 if (dev->dev_ops->xstats_reset != NULL)
1055 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1061 rte_event_dev_start(uint8_t dev_id)
1063 struct rte_eventdev *dev;
1066 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1068 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1069 dev = &rte_eventdevs[dev_id];
1070 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1072 if (dev->data->dev_started != 0) {
1073 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1078 diag = (*dev->dev_ops->dev_start)(dev);
1080 dev->data->dev_started = 1;
1088 rte_event_dev_stop(uint8_t dev_id)
1090 struct rte_eventdev *dev;
1092 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1094 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1095 dev = &rte_eventdevs[dev_id];
1096 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1098 if (dev->data->dev_started == 0) {
1099 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1104 dev->data->dev_started = 0;
1105 (*dev->dev_ops->dev_stop)(dev);
1109 rte_event_dev_close(uint8_t dev_id)
1111 struct rte_eventdev *dev;
1113 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1114 dev = &rte_eventdevs[dev_id];
1115 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1117 /* Device must be stopped before it can be closed */
1118 if (dev->data->dev_started == 1) {
1119 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1124 return (*dev->dev_ops->dev_close)(dev);
1128 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1131 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1132 const struct rte_memzone *mz;
1135 /* Generate memzone name */
1136 n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1137 if (n >= (int)sizeof(mz_name))
1140 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1141 mz = rte_memzone_reserve(mz_name,
1142 sizeof(struct rte_eventdev_data),
1145 mz = rte_memzone_lookup(mz_name);
1151 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1152 memset(*data, 0, sizeof(struct rte_eventdev_data));
1157 static inline uint8_t
1158 rte_eventdev_find_free_device_index(void)
1162 for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1163 if (rte_eventdevs[dev_id].attached ==
1164 RTE_EVENTDEV_DETACHED)
1167 return RTE_EVENT_MAX_DEVS;
1170 struct rte_eventdev *
1171 rte_event_pmd_allocate(const char *name, int socket_id)
1173 struct rte_eventdev *eventdev;
1176 if (rte_event_pmd_get_named_dev(name) != NULL) {
1177 RTE_EDEV_LOG_ERR("Event device with name %s already "
1178 "allocated!", name);
1182 dev_id = rte_eventdev_find_free_device_index();
1183 if (dev_id == RTE_EVENT_MAX_DEVS) {
1184 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1188 eventdev = &rte_eventdevs[dev_id];
1190 if (eventdev->data == NULL) {
1191 struct rte_eventdev_data *eventdev_data = NULL;
1193 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1196 if (retval < 0 || eventdev_data == NULL)
1199 eventdev->data = eventdev_data;
1201 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1204 eventdev->data->dev_id = dev_id;
1205 eventdev->data->socket_id = socket_id;
1206 eventdev->data->dev_started = 0;
1208 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1210 eventdev_globals.nb_devs++;
1217 rte_event_pmd_release(struct rte_eventdev *eventdev)
1220 char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1221 const struct rte_memzone *mz;
1223 if (eventdev == NULL)
1226 eventdev->attached = RTE_EVENTDEV_DETACHED;
1227 eventdev_globals.nb_devs--;
1229 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1230 rte_free(eventdev->data->dev_private);
1232 /* Generate memzone name */
1233 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1234 eventdev->data->dev_id);
1235 if (ret >= (int)sizeof(mz_name))
1238 mz = rte_memzone_lookup(mz_name);
1242 ret = rte_memzone_free(mz);
1247 eventdev->data = NULL;