1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc.
3 * Copyright(c) 2016-2018 Intel Corporation.
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
14 * RTE Event Device API
16 * In a polling model, lcores poll ethdev ports and associated rx queues
17 * directly to look for packet. In an event driven model, by contrast, lcores
18 * call the scheduler that selects packets for them based on programmer
19 * specified criteria. Eventdev library adds support for event driven
20 * programming model, which offer applications automatic multicore scaling,
21 * dynamic load balancing, pipelining, packet ingress order maintenance and
22 * synchronization services to simplify application packet processing.
24 * The Event Device API is composed of two parts:
26 * - The application-oriented Event API that includes functions to setup
27 * an event device (configure it, setup its queues, ports and start it), to
28 * establish the link between queues to port and to receive events, and so on.
30 * - The driver-oriented Event API that exports a function allowing
31 * an event poll Mode Driver (PMD) to simultaneously register itself as
32 * an event device driver.
34 * Event device components:
38 * +-------+ | | flow 0 | |
39 * |Packet | | +-------------+ |
40 * |event | | +-------------+ |
41 * | | | | flow 1 | |port_link(port0, queue0)
42 * +-------+ | +-------------+ | | +--------+
43 * +-------+ | +-------------+ o-----v-----o |dequeue +------+
44 * |Crypto | | | flow n | | | event +------->|Core 0|
45 * |work | | +-------------+ o----+ | port 0 | | |
46 * |done ev| | event queue 0 | | +--------+ +------+
47 * +-------+ +-----------------+ |
49 * |Timer | +-----------------+ | +--------+
50 * |expiry | | +-------------+ | +------o |dequeue +------+
51 * |event | | | flow 0 | o-----------o event +------->|Core 1|
52 * +-------+ | +-------------+ | +----o port 1 | | |
53 * Event enqueue | +-------------+ | | +--------+ +------+
54 * o-------------> | | flow 1 | | |
55 * enqueue( | +-------------+ | |
56 * queue_id, | | | +--------+ +------+
57 * flow_id, | +-------------+ | | | |dequeue |Core 2|
58 * sched_type, | | flow n | o-----------o event +------->| |
59 * event_type, | +-------------+ | | | port 2 | +------+
60 * subev_type, | event queue 1 | | +--------+
61 * event) +-----------------+ | +--------+
62 * | | |dequeue +------+
63 * +-------+ +-----------------+ | | event +------->|Core n|
64 * |Core | | +-------------+ o-----------o port n | | |
65 * |(SW) | | | flow 0 | | | +--------+ +--+---+
66 * |event | | +-------------+ | | |
67 * +-------+ | +-------------+ | | |
68 * ^ | | flow 1 | | | |
69 * | | +-------------+ o------+ |
70 * | | +-------------+ | |
72 * | | +-------------+ | |
73 * | | event queue n | |
74 * | +-----------------+ |
76 * +-----------------------------------------------------------+
78 * Event device: A hardware or software-based event scheduler.
80 * Event: A unit of scheduling that encapsulates a packet or other datatype
81 * like SW generated event from the CPU, Crypto work completion notification,
82 * Timer expiry event notification etc as well as metadata.
83 * The metadata includes flow ID, scheduling type, event priority, event_type,
86 * Event queue: A queue containing events that are scheduled by the event dev.
87 * An event queue contains events of different flows associated with scheduling
88 * types, such as atomic, ordered, or parallel.
90 * Event port: An application's interface into the event dev for enqueue and
91 * dequeue operations. Each event port can be linked with one or more
92 * event queues for dequeue operations.
94 * By default, all the functions of the Event Device API exported by a PMD
95 * are lock-free functions which assume to not be invoked in parallel on
96 * different logical cores to work on the same target object. For instance,
97 * the dequeue function of a PMD cannot be invoked in parallel on two logical
98 * cores to operates on same event port. Of course, this function
99 * can be invoked in parallel by different logical cores on different ports.
100 * It is the responsibility of the upper level application to enforce this rule.
102 * In all functions of the Event API, the Event device is
103 * designated by an integer >= 0 named the device identifier *dev_id*
105 * At the Event driver level, Event devices are represented by a generic
106 * data structure of type *rte_event_dev*.
108 * Event devices are dynamically registered during the PCI/SoC device probing
109 * phase performed at EAL initialization time.
110 * When an Event device is being probed, a *rte_event_dev* structure and
111 * a new device identifier are allocated for that device. Then, the
112 * event_dev_init() function supplied by the Event driver matching the probed
113 * device is invoked to properly initialize the device.
115 * The role of the device init function consists of resetting the hardware or
116 * software event driver implementations.
118 * If the device init operation is successful, the correspondence between
119 * the device identifier assigned to the new device and its associated
120 * *rte_event_dev* structure is effectively registered.
121 * Otherwise, both the *rte_event_dev* structure and the device identifier are
124 * The functions exported by the application Event API to setup a device
125 * designated by its device identifier must be invoked in the following order:
126 * - rte_event_dev_configure()
127 * - rte_event_queue_setup()
128 * - rte_event_port_setup()
129 * - rte_event_port_link()
130 * - rte_event_dev_start()
132 * Then, the application can invoke, in any order, the functions
133 * exported by the Event API to schedule events, dequeue events, enqueue events,
134 * change event queue(s) to event port [un]link establishment and so on.
136 * Application may use rte_event_[queue/port]_default_conf_get() to get the
137 * default configuration to set up an event queue or event port by
138 * overriding few default values.
140 * If the application wants to change the configuration (i.e. call
141 * rte_event_dev_configure(), rte_event_queue_setup(), or
142 * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
143 * device and then do the reconfiguration before calling rte_event_dev_start()
144 * again. The schedule, enqueue and dequeue functions should not be invoked
145 * when the device is stopped.
147 * Finally, an application can close an Event device by invoking the
148 * rte_event_dev_close() function.
150 * Each function of the application Event API invokes a specific function
151 * of the PMD that controls the target device designated by its device
154 * For this purpose, all device-specific functions of an Event driver are
155 * supplied through a set of pointers contained in a generic structure of type
157 * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
158 * structure by the device init function of the Event driver, which is
159 * invoked during the PCI/SoC device probing phase, as explained earlier.
161 * In other words, each function of the Event API simply retrieves the
162 * *rte_event_dev* structure associated with the device identifier and
163 * performs an indirect invocation of the corresponding driver function
164 * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
166 * For performance reasons, the address of the fast-path functions of the
167 * Event driver is not contained in the *event_dev_ops* structure.
168 * Instead, they are directly stored at the beginning of the *rte_event_dev*
169 * structure to avoid an extra indirect memory access during their invocation.
171 * RTE event device drivers do not use interrupts for enqueue or dequeue
172 * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
173 * functions to applications.
175 * The events are injected to event device through *enqueue* operation by
176 * event producers in the system. The typical event producers are ethdev
177 * subsystem for generating packet events, CPU(SW) for generating events based
178 * on different stages of application processing, cryptodev for generating
179 * crypto work completion notification etc
181 * The *dequeue* operation gets one or more events from the event ports.
182 * The application process the events and send to downstream event queue through
183 * rte_event_enqueue_burst() if it is an intermediate stage of event processing,
184 * on the final stage, the application may use Tx adapter API for maintaining
185 * the ingress order and then send the packet/event on the wire.
187 * The point at which events are scheduled to ports depends on the device.
188 * For hardware devices, scheduling occurs asynchronously without any software
189 * intervention. Software schedulers can either be distributed
190 * (each worker thread schedules events to its own port) or centralized
191 * (a dedicated thread schedules to all ports). Distributed software schedulers
192 * perform the scheduling in rte_event_dequeue_burst(), whereas centralized
193 * scheduler logic need a dedicated service core for scheduling.
194 * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
195 * indicates the device is centralized and thus needs a dedicated scheduling
196 * thread that repeatedly calls software specific scheduling function.
198 * An event driven worker thread has following typical workflow on fastpath:
201 * rte_event_dequeue_burst(...);
203 * rte_event_enqueue_burst(...);
213 #include <rte_common.h>
214 #include <rte_config.h>
215 #include <rte_memory.h>
216 #include <rte_errno.h>
218 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 /* Event device capability bitmap flags */
222 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
223 /**< Event scheduling prioritization is based on the priority associated with
226 * @see rte_event_queue_setup()
228 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
229 /**< Event scheduling prioritization is based on the priority associated with
230 * each event. Priority of each event is supplied in *rte_event* structure
231 * on each enqueue operation.
233 * @see rte_event_enqueue_burst()
235 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
236 /**< Event device operates in distributed scheduling mode.
237 * In distributed scheduling mode, event scheduling happens in HW or
238 * rte_event_dequeue_burst() or the combination of these two.
239 * If the flag is not set then eventdev is centralized and thus needs a
240 * dedicated service core that acts as a scheduling thread .
242 * @see rte_event_dequeue_burst()
244 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
245 /**< Event device is capable of enqueuing events of any type to any queue.
246 * If this capability is not set, the queue only supports events of the
247 * *RTE_SCHED_TYPE_* type that it was created with.
249 * @see RTE_SCHED_TYPE_* values
251 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
252 /**< Event device is capable of operating in burst mode for enqueue(forward,
253 * release) and dequeue operation. If this capability is not set, application
254 * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
255 * PMD accepts only one event at a time.
257 * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
259 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
260 /**< Event device ports support disabling the implicit release feature, in
261 * which the port will release all unreleased events in its dequeue operation.
262 * If this capability is set and the port is configured with implicit release
263 * disabled, the application is responsible for explicitly releasing events
264 * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event
265 * enqueue operations.
267 * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
270 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
271 /**< Event device is capable of operating in none sequential mode. The path
272 * of the event is not necessary to be sequential. Application can change
273 * the path of event at runtime. If the flag is not set, then event each event
274 * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is
275 * set, events may be sent to queues in any order. If the flag is not set, the
276 * eventdev will return an error when the application enqueues an event for a
277 * qid which is not the next in the sequence.
280 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
281 /**< Event device is capable of configuring the queue/port link at runtime.
282 * If the flag is not set, the eventdev queue/port link is only can be
283 * configured during initialization.
286 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
287 /**< Event device is capable of setting up the link between multiple queue
288 * with single port. If the flag is not set, the eventdev can only map a
289 * single queue to each port or map a single queue to many port.
292 /* Event device priority levels */
293 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
294 /**< Highest priority expressed across eventdev subsystem
295 * @see rte_event_queue_setup(), rte_event_enqueue_burst()
296 * @see rte_event_port_link()
298 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
299 /**< Normal priority expressed across eventdev subsystem
300 * @see rte_event_queue_setup(), rte_event_enqueue_burst()
301 * @see rte_event_port_link()
303 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
304 /**< Lowest priority expressed across eventdev subsystem
305 * @see rte_event_queue_setup(), rte_event_enqueue_burst()
306 * @see rte_event_port_link()
310 * Get the total number of event devices that have been successfully
314 * The total number of usable event devices.
317 rte_event_dev_count(void);
320 * Get the device identifier for the named event device.
323 * Event device name to select the event device identifier.
326 * Returns event device identifier on success.
327 * - <0: Failure to find named event device.
330 rte_event_dev_get_dev_id(const char *name);
333 * Return the NUMA socket to which a device is connected.
336 * The identifier of the device.
338 * The NUMA socket id to which the device is connected or
339 * a default of zero if the socket could not be determined.
340 * -(-EINVAL) dev_id value is out of range.
343 rte_event_dev_socket_id(uint8_t dev_id);
346 * Event device information
348 struct rte_event_dev_info {
349 const char *driver_name; /**< Event driver name */
350 struct rte_device *dev; /**< Device information */
351 uint32_t min_dequeue_timeout_ns;
352 /**< Minimum supported global dequeue timeout(ns) by this device */
353 uint32_t max_dequeue_timeout_ns;
354 /**< Maximum supported global dequeue timeout(ns) by this device */
355 uint32_t dequeue_timeout_ns;
356 /**< Configured global dequeue timeout(ns) for this device */
357 uint8_t max_event_queues;
358 /**< Maximum event_queues supported by this device */
359 uint32_t max_event_queue_flows;
360 /**< Maximum supported flows in an event queue by this device*/
361 uint8_t max_event_queue_priority_levels;
362 /**< Maximum number of event queue priority levels by this device.
363 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
365 uint8_t max_event_priority_levels;
366 /**< Maximum number of event priority levels by this device.
367 * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
369 uint8_t max_event_ports;
370 /**< Maximum number of event ports supported by this device */
371 uint8_t max_event_port_dequeue_depth;
372 /**< Maximum number of events can be dequeued at a time from an
373 * event port by this device.
374 * A device that does not support bulk dequeue will set this as 1.
376 uint32_t max_event_port_enqueue_depth;
377 /**< Maximum number of events can be enqueued at a time from an
378 * event port by this device.
379 * A device that does not support bulk enqueue will set this as 1.
381 int32_t max_num_events;
382 /**< A *closed system* event dev has a limit on the number of events it
383 * can manage at a time. An *open system* event dev does not have a
384 * limit and will specify this as -1.
386 uint32_t event_dev_cap;
387 /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
391 * Retrieve the contextual information of an event device.
394 * The identifier of the device.
396 * @param[out] dev_info
397 * A pointer to a structure of type *rte_event_dev_info* to be filled with the
398 * contextual information of the device.
401 * - 0: Success, driver updates the contextual information of the event device
402 * - <0: Error code returned by the driver info get function.
406 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
409 * The count of ports.
411 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
413 * The count of queues.
415 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
417 * The status of the device, zero for stopped, non-zero for started.
419 #define RTE_EVENT_DEV_ATTR_STARTED 2
422 * Get an attribute from a device.
424 * @param dev_id Eventdev id
425 * @param attr_id The attribute ID to retrieve
426 * @param[out] attr_value A pointer that will be filled in with the attribute
427 * value if successful.
430 * - 0: Successfully retrieved attribute value
431 * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL
434 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
435 uint32_t *attr_value);
438 /* Event device configuration bitmap flags */
439 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
440 /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
441 * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
444 /** Event device configuration structure */
445 struct rte_event_dev_config {
446 uint32_t dequeue_timeout_ns;
447 /**< rte_event_dequeue_burst() timeout on this device.
448 * This value should be in the range of *min_dequeue_timeout_ns* and
449 * *max_dequeue_timeout_ns* which previously provided in
450 * rte_event_dev_info_get()
451 * The value 0 is allowed, in which case, default dequeue timeout used.
452 * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
454 int32_t nb_events_limit;
455 /**< In a *closed system* this field is the limit on maximum number of
456 * events that can be inflight in the eventdev at a given time. The
457 * limit is required to ensure that the finite space in a closed system
458 * is not overwhelmed. The value cannot exceed the *max_num_events*
459 * as provided by rte_event_dev_info_get().
460 * This value should be set to -1 for *open system*.
462 uint8_t nb_event_queues;
463 /**< Number of event queues to configure on this device.
464 * This value cannot exceed the *max_event_queues* which previously
465 * provided in rte_event_dev_info_get()
467 uint8_t nb_event_ports;
468 /**< Number of event ports to configure on this device.
469 * This value cannot exceed the *max_event_ports* which previously
470 * provided in rte_event_dev_info_get()
472 uint32_t nb_event_queue_flows;
473 /**< Number of flows for any event queue on this device.
474 * This value cannot exceed the *max_event_queue_flows* which previously
475 * provided in rte_event_dev_info_get()
477 uint32_t nb_event_port_dequeue_depth;
478 /**< Maximum number of events can be dequeued at a time from an
479 * event port by this device.
480 * This value cannot exceed the *max_event_port_dequeue_depth*
481 * which previously provided in rte_event_dev_info_get().
482 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
483 * @see rte_event_port_setup()
485 uint32_t nb_event_port_enqueue_depth;
486 /**< Maximum number of events can be enqueued at a time from an
487 * event port by this device.
488 * This value cannot exceed the *max_event_port_enqueue_depth*
489 * which previously provided in rte_event_dev_info_get().
490 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
491 * @see rte_event_port_setup()
493 uint32_t event_dev_cfg;
494 /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
498 * Configure an event device.
500 * This function must be invoked first before any other function in the
501 * API. This function can also be re-invoked when a device is in the
504 * The caller may use rte_event_dev_info_get() to get the capability of each
505 * resources available for this event device.
508 * The identifier of the device to configure.
510 * The event device configuration structure.
513 * - 0: Success, device configured.
514 * - <0: Error code returned by the driver configuration function.
517 rte_event_dev_configure(uint8_t dev_id,
518 const struct rte_event_dev_config *dev_conf);
521 /* Event queue specific APIs */
523 /* Event queue configuration bitmap flags */
524 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
525 /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
527 * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
528 * @see rte_event_enqueue_burst()
530 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
531 /**< This event queue links only to a single event port.
533 * @see rte_event_port_setup(), rte_event_port_link()
536 /** Event queue configuration structure */
537 struct rte_event_queue_conf {
538 uint32_t nb_atomic_flows;
539 /**< The maximum number of active flows this queue can track at any
540 * given time. If the queue is configured for atomic scheduling (by
541 * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
542 * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
543 * value must be in the range of [1, nb_event_queue_flows], which was
544 * previously provided in rte_event_dev_configure().
546 uint32_t nb_atomic_order_sequences;
547 /**< The maximum number of outstanding events waiting to be
548 * reordered by this queue. In other words, the number of entries in
549 * this queue’s reorder buffer.When the number of events in the
550 * reorder buffer reaches to *nb_atomic_order_sequences* then the
551 * scheduler cannot schedule the events from this queue and invalid
552 * event will be returned from dequeue until one or more entries are
554 * If the queue is configured for ordered scheduling (by applying the
555 * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
556 * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
557 * be in the range of [1, nb_event_queue_flows], which was
558 * previously supplied to rte_event_dev_configure().
560 uint32_t event_queue_cfg;
561 /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
562 uint8_t schedule_type;
563 /**< Queue schedule type(RTE_SCHED_TYPE_*).
564 * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
568 /**< Priority for this event queue relative to other event queues.
569 * The requested priority should in the range of
570 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
571 * The implementation shall normalize the requested priority to
572 * event device supported priority value.
573 * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
578 * Retrieve the default configuration information of an event queue designated
579 * by its *queue_id* from the event driver for an event device.
581 * This function intended to be used in conjunction with rte_event_queue_setup()
582 * where caller needs to set up the queue by overriding few default values.
585 * The identifier of the device.
587 * The index of the event queue to get the configuration information.
588 * The value must be in the range [0, nb_event_queues - 1]
589 * previously supplied to rte_event_dev_configure().
590 * @param[out] queue_conf
591 * The pointer to the default event queue configuration data.
593 * - 0: Success, driver updates the default event queue configuration data.
594 * - <0: Error code returned by the driver info get function.
596 * @see rte_event_queue_setup()
600 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
601 struct rte_event_queue_conf *queue_conf);
604 * Allocate and set up an event queue for an event device.
607 * The identifier of the device.
609 * The index of the event queue to setup. The value must be in the range
610 * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
612 * The pointer to the configuration data to be used for the event queue.
613 * NULL value is allowed, in which case default configuration used.
615 * @see rte_event_queue_default_conf_get()
618 * - 0: Success, event queue correctly set up.
619 * - <0: event queue configuration failed
622 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
623 const struct rte_event_queue_conf *queue_conf);
626 * The priority of the queue.
628 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
630 * The number of atomic flows configured for the queue.
632 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
634 * The number of atomic order sequences configured for the queue.
636 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
638 * The cfg flags for the queue.
640 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
642 * The schedule type of the queue.
644 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
647 * Get an attribute from a queue.
654 * The attribute ID to retrieve
655 * @param[out] attr_value
656 * A pointer that will be filled in with the attribute value if successful
659 * - 0: Successfully returned value
660 * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
662 * - -EOVERFLOW: returned when attr_id is set to
663 * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
664 * RTE_EVENT_QUEUE_CFG_ALL_TYPES
667 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
668 uint32_t *attr_value);
670 /* Event port specific APIs */
672 /** Event port configuration structure */
673 struct rte_event_port_conf {
674 int32_t new_event_threshold;
675 /**< A backpressure threshold for new event enqueues on this port.
676 * Use for *closed system* event dev where event capacity is limited,
677 * and cannot exceed the capacity of the event dev.
678 * Configuring ports with different thresholds can make higher priority
679 * traffic less likely to be backpressured.
680 * For example, a port used to inject NIC Rx packets into the event dev
681 * can have a lower threshold so as not to overwhelm the device,
682 * while ports used for worker pools can have a higher threshold.
683 * This value cannot exceed the *nb_events_limit*
684 * which was previously supplied to rte_event_dev_configure().
685 * This should be set to '-1' for *open system*.
687 uint16_t dequeue_depth;
688 /**< Configure number of bulk dequeues for this event port.
689 * This value cannot exceed the *nb_event_port_dequeue_depth*
690 * which previously supplied to rte_event_dev_configure().
691 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
693 uint16_t enqueue_depth;
694 /**< Configure number of bulk enqueues for this event port.
695 * This value cannot exceed the *nb_event_port_enqueue_depth*
696 * which previously supplied to rte_event_dev_configure().
697 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
699 uint8_t disable_implicit_release;
700 /**< Configure the port not to release outstanding events in
701 * rte_event_dev_dequeue_burst(). If true, all events received through
702 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
703 * RTE_EVENT_OP_FORWARD. Must be false when the device is not
704 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
709 * Retrieve the default configuration information of an event port designated
710 * by its *port_id* from the event driver for an event device.
712 * This function intended to be used in conjunction with rte_event_port_setup()
713 * where caller needs to set up the port by overriding few default values.
716 * The identifier of the device.
718 * The index of the event port to get the configuration information.
719 * The value must be in the range [0, nb_event_ports - 1]
720 * previously supplied to rte_event_dev_configure().
721 * @param[out] port_conf
722 * The pointer to the default event port configuration data
724 * - 0: Success, driver updates the default event port configuration data.
725 * - <0: Error code returned by the driver info get function.
727 * @see rte_event_port_setup()
731 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
732 struct rte_event_port_conf *port_conf);
735 * Allocate and set up an event port for an event device.
738 * The identifier of the device.
740 * The index of the event port to setup. The value must be in the range
741 * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
743 * The pointer to the configuration data to be used for the queue.
744 * NULL value is allowed, in which case default configuration used.
746 * @see rte_event_port_default_conf_get()
749 * - 0: Success, event port correctly set up.
750 * - <0: Port configuration failed
751 * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
752 * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
755 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
756 const struct rte_event_port_conf *port_conf);
759 * The queue depth of the port on the enqueue side
761 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
763 * The queue depth of the port on the dequeue side
765 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
767 * The new event threshold of the port
769 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
772 * Get an attribute from a port.
779 * The attribute ID to retrieve
780 * @param[out] attr_value
781 * A pointer that will be filled in with the attribute value if successful
784 * - 0: Successfully returned value
785 * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
788 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
789 uint32_t *attr_value);
792 * Start an event device.
794 * The device start step is the last one and consists of setting the event
795 * queues to start accepting the events and schedules to event ports.
797 * On success, all basic functions exported by the API (event enqueue,
798 * event dequeue and so on) can be invoked.
801 * Event device identifier
803 * - 0: Success, device started.
804 * - -ESTALE : Not all ports of the device are configured
805 * - -ENOLINK: Not all queues are linked, which could lead to deadlock.
808 rte_event_dev_start(uint8_t dev_id);
811 * Stop an event device.
813 * This function causes all queued events to be drained, including those
814 * residing in event ports. While draining events out of the device, this
815 * function calls the user-provided flush callback (if one was registered) once
818 * The device can be restarted with a call to rte_event_dev_start(). Threads
819 * that continue to enqueue/dequeue while the device is stopped, or being
820 * stopped, will result in undefined behavior. This includes event adapters,
821 * which must be stopped prior to stopping the eventdev.
824 * Event device identifier.
826 * @see rte_event_dev_stop_flush_callback_register()
829 rte_event_dev_stop(uint8_t dev_id);
831 typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
833 /**< Callback function called during rte_event_dev_stop(), invoked once per
838 * Registers a callback function to be invoked during rte_event_dev_stop() for
839 * each flushed event. This function can be used to properly dispose of queued
840 * events, for example events containing memory pointers.
842 * The callback function is only registered for the calling process. The
843 * callback function must be registered in every process that can call
844 * rte_event_dev_stop().
846 * To unregister a callback, call this function with a NULL callback pointer.
849 * The identifier of the device.
851 * Callback function invoked once per flushed event.
853 * Argument supplied to callback.
857 * - -EINVAL if *dev_id* is invalid
859 * @see rte_event_dev_stop()
862 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
863 eventdev_stop_flush_t callback, void *userdata);
866 * Close an event device. The device cannot be restarted!
869 * Event device identifier
872 * - 0 on successfully closing device
873 * - <0 on failure to close device
874 * - (-EAGAIN) if device is busy
877 rte_event_dev_close(uint8_t dev_id);
879 /* Scheduler type definitions */
880 #define RTE_SCHED_TYPE_ORDERED 0
881 /**< Ordered scheduling
883 * Events from an ordered flow of an event queue can be scheduled to multiple
884 * ports for concurrent processing while maintaining the original event order.
885 * This scheme enables the user to achieve high single flow throughput by
886 * avoiding SW synchronization for ordering between ports which bound to cores.
888 * The source flow ordering from an event queue is maintained when events are
889 * enqueued to their destination queue within the same ordered flow context.
890 * An event port holds the context until application call
891 * rte_event_dequeue_burst() from the same port, which implicitly releases
893 * User may allow the scheduler to release the context earlier than that
894 * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
896 * Events from the source queue appear in their original order when dequeued
897 * from a destination queue.
898 * Event ordering is based on the received event(s), but also other
899 * (newly allocated or stored) events are ordered when enqueued within the same
900 * ordered context. Events not enqueued (e.g. released or stored) within the
901 * context are considered missing from reordering and are skipped at this time
902 * (but can be ordered again within another context).
904 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
907 #define RTE_SCHED_TYPE_ATOMIC 1
908 /**< Atomic scheduling
910 * Events from an atomic flow of an event queue can be scheduled only to a
911 * single port at a time. The port is guaranteed to have exclusive (atomic)
912 * access to the associated flow context, which enables the user to avoid SW
913 * synchronization. Atomic flows also help to maintain event ordering
914 * since only one port at a time can process events from a flow of an
917 * The atomic queue synchronization context is dedicated to the port until
918 * application call rte_event_dequeue_burst() from the same port,
919 * which implicitly releases the context. User may allow the scheduler to
920 * release the context earlier than that by invoking rte_event_enqueue_burst()
921 * with RTE_EVENT_OP_RELEASE operation.
923 * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
926 #define RTE_SCHED_TYPE_PARALLEL 2
927 /**< Parallel scheduling
929 * The scheduler performs priority scheduling, load balancing, etc. functions
930 * but does not provide additional event synchronization or ordering.
931 * It is free to schedule events from a single parallel flow of an event queue
932 * to multiple events ports for concurrent processing.
933 * The application is responsible for flow context synchronization and
934 * event ordering (SW synchronization).
936 * @see rte_event_queue_setup(), rte_event_dequeue_burst()
939 /* Event types to classify the event source */
940 #define RTE_EVENT_TYPE_ETHDEV 0x0
941 /**< The event generated from ethdev subsystem */
942 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
943 /**< The event generated from crypodev subsystem */
944 #define RTE_EVENT_TYPE_TIMER 0x2
945 /**< The event generated from event timer adapter */
946 #define RTE_EVENT_TYPE_CPU 0x3
947 /**< The event generated from cpu for pipelining.
948 * Application may use *sub_event_type* to further classify the event
950 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
951 /**< The event generated from event eth Rx adapter */
952 #define RTE_EVENT_TYPE_MAX 0x10
953 /**< Maximum number of event types */
955 /* Event enqueue operations */
956 #define RTE_EVENT_OP_NEW 0
957 /**< The event producers use this operation to inject a new event to the
960 #define RTE_EVENT_OP_FORWARD 1
961 /**< The CPU use this operation to forward the event to different event queue or
962 * change to new application specific flow or schedule type to enable
965 * This operation must only be enqueued to the same port that the
966 * event to be forwarded was dequeued from.
968 #define RTE_EVENT_OP_RELEASE 2
969 /**< Release the flow context associated with the schedule type.
971 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
972 * then this function hints the scheduler that the user has completed critical
973 * section processing in the current atomic context.
974 * The scheduler is now allowed to schedule events from the same flow from
975 * an event queue to another port. However, the context may be still held
976 * until the next rte_event_dequeue_burst() call, this call allows but does not
977 * force the scheduler to release the context early.
979 * Early atomic context release may increase parallelism and thus system
980 * performance, but the user needs to design carefully the split into critical
981 * vs non-critical sections.
983 * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
984 * then this function hints the scheduler that the user has done all that need
985 * to maintain event order in the current ordered context.
986 * The scheduler is allowed to release the ordered context of this port and
987 * avoid reordering any following enqueues.
989 * Early ordered context release may increase parallelism and thus system
992 * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
993 * or no scheduling context is held then this function may be an NOOP,
994 * depending on the implementation.
996 * This operation must only be enqueued to the same port that the
997 * event to be released was dequeued from.
1002 * The generic *rte_event* structure to hold the event attributes
1003 * for dequeue and enqueue operation
1010 /** Event attributes for dequeue or enqueue operation */
1012 uint32_t flow_id:20;
1013 /**< Targeted flow identifier for the enqueue and
1014 * dequeue operation.
1015 * The value must be in the range of
1016 * [0, nb_event_queue_flows - 1] which
1017 * previously supplied to rte_event_dev_configure().
1019 uint32_t sub_event_type:8;
1020 /**< Sub-event types based on the event source.
1021 * @see RTE_EVENT_TYPE_CPU
1023 uint32_t event_type:4;
1024 /**< Event type to classify the event source.
1025 * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
1028 /**< The type of event enqueue operation - new/forward/
1029 * etc.This field is not preserved across an instance
1030 * and is undefined on dequeue.
1031 * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
1034 /**< Reserved for future use */
1035 uint8_t sched_type:2;
1036 /**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
1037 * associated with flow id on a given event queue
1038 * for the enqueue and dequeue operation.
1041 /**< Targeted event queue identifier for the enqueue or
1042 * dequeue operation.
1043 * The value must be in the range of
1044 * [0, nb_event_queues - 1] which previously supplied to
1045 * rte_event_dev_configure().
1048 /**< Event priority relative to other events in the
1049 * event queue. The requested priority should in the
1050 * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST,
1051 * RTE_EVENT_DEV_PRIORITY_LOWEST].
1052 * The implementation shall normalize the requested
1053 * priority to supported priority value.
1054 * Valid when the device has
1055 * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
1057 uint8_t impl_opaque;
1058 /**< Implementation specific opaque value.
1059 * An implementation may use this field to hold
1060 * implementation specific value to share between
1061 * dequeue and enqueue operation.
1062 * The application should not modify this field.
1069 /**< Opaque 64-bit value */
1071 /**< Opaque event pointer */
1072 struct rte_mbuf *mbuf;
1073 /**< mbuf pointer if dequeued event is associated with mbuf */
1077 /* Ethdev Rx adapter capability bitmap flags */
1078 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1079 /**< This flag is sent when the packet transfer mechanism is in HW.
1080 * Ethdev can send packets to the event device using internal event port.
1082 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1083 /**< Adapter supports multiple event queues per ethdev. Every ethdev
1084 * Rx queue can be connected to a unique event queue.
1086 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1087 /**< The application can override the adapter generated flow ID in the
1088 * event. This flow ID can be specified when adding an ethdev Rx queue
1089 * to the adapter using the ev member of struct rte_event_eth_rx_adapter
1090 * @see struct rte_event_eth_rx_adapter_queue_conf::ev
1091 * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
1095 * Retrieve the event device's ethdev Rx adapter capabilities for the
1096 * specified ethernet port
1099 * The identifier of the device.
1101 * @param eth_port_id
1102 * The identifier of the ethernet device.
1105 * A pointer to memory filled with Rx event adapter capabilities.
1108 * - 0: Success, driver provides Rx event adapter capabilities for the
1110 * - <0: Error code returned by the driver function.
1114 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1117 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1118 /**< This flag is set when the timer mechanism is in HW. */
1121 * Retrieve the event device's timer adapter capabilities.
1124 * The identifier of the device.
1127 * A pointer to memory to be filled with event timer adapter capabilities.
1130 * - 0: Success, driver provided event timer adapter capabilities.
1131 * - <0: Error code returned by the driver function.
1134 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1136 /* Crypto adapter capability bitmap flag */
1137 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1138 /**< Flag indicates HW is capable of generating events in
1139 * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
1140 * packets to the event device as new events using an internal
1144 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1145 /**< Flag indicates HW is capable of generating events in
1146 * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
1147 * packets to the event device as forwarded event using an
1148 * internal event port.
1151 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1152 /**< Flag indicates HW is capable of mapping crypto queue pair to
1156 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1157 /**< Flag indicates HW/SW supports a mechanism to store and retrieve
1158 * the private data information along with the crypto session.
1162 * Retrieve the event device's crypto adapter capabilities for the
1163 * specified cryptodev device
1166 * The identifier of the device.
1169 * The identifier of the cryptodev device.
1172 * A pointer to memory filled with event adapter capabilities.
1173 * It is expected to be pre-allocated & initialized by caller.
1176 * - 0: Success, driver provides event adapter capabilities for the
1178 * - <0: Error code returned by the driver function.
1182 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1185 /* Ethdev Tx adapter capability bitmap flags */
1186 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1187 /**< This flag is sent when the PMD supports a packet transmit callback
1191 * Retrieve the event device's eth Tx adapter capabilities
1194 * The identifier of the device.
1196 * @param eth_port_id
1197 * The identifier of the ethernet device.
1200 * A pointer to memory filled with eth Tx adapter capabilities.
1203 * - 0: Success, driver provides eth Tx adapter capabilities.
1204 * - <0: Error code returned by the driver function.
1208 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1211 struct rte_eventdev_ops;
1212 struct rte_eventdev;
1214 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
1215 /**< @internal Enqueue event on port of a device */
1217 typedef uint16_t (*event_enqueue_burst_t)(void *port,
1218 const struct rte_event ev[], uint16_t nb_events);
1219 /**< @internal Enqueue burst of events on port of a device */
1221 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
1222 uint64_t timeout_ticks);
1223 /**< @internal Dequeue event from port of a device */
1225 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
1226 uint16_t nb_events, uint64_t timeout_ticks);
1227 /**< @internal Dequeue burst of events from port of a device */
1229 typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
1230 struct rte_event ev[], uint16_t nb_events);
1231 /**< @internal Enqueue burst of events on port of a device */
1233 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1234 /**< @internal Max length of name of event PMD */
1238 * The data part, with no function pointers, associated with each device.
1240 * This structure is safe to place in shared memory to be common among
1241 * different processes in a multi-process configuration.
1243 struct rte_eventdev_data {
1245 /**< Socket ID where memory is allocated */
1247 /**< Device ID for this instance */
1249 /**< Number of event queues. */
1251 /**< Number of event ports. */
1253 /**< Array of pointers to ports. */
1254 struct rte_event_port_conf *ports_cfg;
1255 /**< Array of port configuration structures. */
1256 struct rte_event_queue_conf *queues_cfg;
1257 /**< Array of queue configuration structures. */
1258 uint16_t *links_map;
1259 /**< Memory to store queues to port connections. */
1261 /**< PMD-specific private data */
1262 uint32_t event_dev_cap;
1263 /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
1264 struct rte_event_dev_config dev_conf;
1265 /**< Configuration applied to device. */
1266 uint8_t service_inited;
1267 /* Service initialization state */
1268 uint32_t service_id;
1270 void *dev_stop_flush_arg;
1271 /**< User-provided argument for event flush function */
1274 uint8_t dev_started : 1;
1275 /**< Device state: STARTED(1)/STOPPED(0) */
1277 char name[RTE_EVENTDEV_NAME_MAX_LEN];
1278 /**< Unique identifier name */
1279 } __rte_cache_aligned;
1281 /** @internal The data structure associated with each event device. */
1282 struct rte_eventdev {
1283 event_enqueue_t enqueue;
1284 /**< Pointer to PMD enqueue function. */
1285 event_enqueue_burst_t enqueue_burst;
1286 /**< Pointer to PMD enqueue burst function. */
1287 event_enqueue_burst_t enqueue_new_burst;
1288 /**< Pointer to PMD enqueue burst function(op new variant) */
1289 event_enqueue_burst_t enqueue_forward_burst;
1290 /**< Pointer to PMD enqueue burst function(op forward variant) */
1291 event_dequeue_t dequeue;
1292 /**< Pointer to PMD dequeue function. */
1293 event_dequeue_burst_t dequeue_burst;
1294 /**< Pointer to PMD dequeue burst function. */
1295 event_tx_adapter_enqueue txa_enqueue;
1296 /**< Pointer to PMD eth Tx adapter enqueue function. */
1297 struct rte_eventdev_data *data;
1298 /**< Pointer to device data */
1299 struct rte_eventdev_ops *dev_ops;
1300 /**< Functions exported by PMD */
1301 struct rte_device *dev;
1302 /**< Device info. supplied by probing */
1305 uint8_t attached : 1;
1306 /**< Flag indicating the device is attached */
1307 } __rte_cache_aligned;
1309 extern struct rte_eventdev *rte_eventdevs;
1310 /** @internal The pool of rte_eventdev structures. */
1312 static __rte_always_inline uint16_t
1313 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1314 const struct rte_event ev[], uint16_t nb_events,
1315 const event_enqueue_burst_t fn)
1317 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1319 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1320 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1325 if (port_id >= dev->data->nb_ports) {
1331 * Allow zero cost non burst mode routine invocation if application
1332 * requests nb_events as const one
1335 return (*dev->enqueue)(dev->data->ports[port_id], ev);
1337 return fn(dev->data->ports[port_id], ev, nb_events);
1341 * Enqueue a burst of events objects or an event object supplied in *rte_event*
1342 * structure on an event device designated by its *dev_id* through the event
1343 * port specified by *port_id*. Each event object specifies the event queue on
1344 * which it will be enqueued.
1346 * The *nb_events* parameter is the number of event objects to enqueue which are
1347 * supplied in the *ev* array of *rte_event* structure.
1349 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
1350 * enqueued to the same port that their associated events were dequeued from.
1352 * The rte_event_enqueue_burst() function returns the number of
1353 * events objects it actually enqueued. A return value equal to *nb_events*
1354 * means that all event objects have been enqueued.
1357 * The identifier of the device.
1359 * The identifier of the event port.
1361 * Points to an array of *nb_events* objects of type *rte_event* structure
1362 * which contain the event object enqueue operations to be processed.
1364 * The number of event objects to enqueue, typically number of
1365 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
1366 * available for this port.
1369 * The number of event objects actually enqueued on the event device. The
1370 * return value can be less than the value of the *nb_events* parameter when
1371 * the event devices queue is full or if invalid parameters are specified in a
1372 * *rte_event*. If the return value is less than *nb_events*, the remaining
1373 * events at the end of ev[] are not consumed and the caller has to take care
1374 * of them, and rte_errno is set accordingly. Possible errno values include:
1375 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue
1376 * ID is invalid, or an event's sched type doesn't match the
1377 * capabilities of the destination queue.
1378 * - ENOSPC The event port was backpressured and unable to enqueue
1379 * one or more events. This error code is only applicable to
1381 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
1383 static inline uint16_t
1384 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1385 const struct rte_event ev[], uint16_t nb_events)
1387 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1389 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1390 dev->enqueue_burst);
1394 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
1395 * an event device designated by its *dev_id* through the event port specified
1398 * Provides the same functionality as rte_event_enqueue_burst(), expect that
1399 * application can use this API when the all objects in the burst contains
1400 * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
1401 * function can provide the additional hint to the PMD and optimize if possible.
1403 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
1404 * has event object of operation type != RTE_EVENT_OP_NEW.
1407 * The identifier of the device.
1409 * The identifier of the event port.
1411 * Points to an array of *nb_events* objects of type *rte_event* structure
1412 * which contain the event object enqueue operations to be processed.
1414 * The number of event objects to enqueue, typically number of
1415 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
1416 * available for this port.
1419 * The number of event objects actually enqueued on the event device. The
1420 * return value can be less than the value of the *nb_events* parameter when
1421 * the event devices queue is full or if invalid parameters are specified in a
1422 * *rte_event*. If the return value is less than *nb_events*, the remaining
1423 * events at the end of ev[] are not consumed and the caller has to take care
1424 * of them, and rte_errno is set accordingly. Possible errno values include:
1425 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue
1426 * ID is invalid, or an event's sched type doesn't match the
1427 * capabilities of the destination queue.
1428 * - ENOSPC The event port was backpressured and unable to enqueue
1429 * one or more events. This error code is only applicable to
1431 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
1432 * @see rte_event_enqueue_burst()
1434 static inline uint16_t
1435 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
1436 const struct rte_event ev[], uint16_t nb_events)
1438 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1440 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1441 dev->enqueue_new_burst);
1445 * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
1446 * on an event device designated by its *dev_id* through the event port
1447 * specified by *port_id*.
1449 * Provides the same functionality as rte_event_enqueue_burst(), expect that
1450 * application can use this API when the all objects in the burst contains
1451 * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
1452 * function can provide the additional hint to the PMD and optimize if possible.
1454 * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
1455 * has event object of operation type != RTE_EVENT_OP_FORWARD.
1458 * The identifier of the device.
1460 * The identifier of the event port.
1462 * Points to an array of *nb_events* objects of type *rte_event* structure
1463 * which contain the event object enqueue operations to be processed.
1465 * The number of event objects to enqueue, typically number of
1466 * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
1467 * available for this port.
1470 * The number of event objects actually enqueued on the event device. The
1471 * return value can be less than the value of the *nb_events* parameter when
1472 * the event devices queue is full or if invalid parameters are specified in a
1473 * *rte_event*. If the return value is less than *nb_events*, the remaining
1474 * events at the end of ev[] are not consumed and the caller has to take care
1475 * of them, and rte_errno is set accordingly. Possible errno values include:
1476 * - EINVAL The port ID is invalid, device ID is invalid, an event's queue
1477 * ID is invalid, or an event's sched type doesn't match the
1478 * capabilities of the destination queue.
1479 * - ENOSPC The event port was backpressured and unable to enqueue
1480 * one or more events. This error code is only applicable to
1482 * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
1483 * @see rte_event_enqueue_burst()
1485 static inline uint16_t
1486 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
1487 const struct rte_event ev[], uint16_t nb_events)
1489 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1491 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1492 dev->enqueue_forward_burst);
1496 * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
1498 * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
1499 * then application can use this function to convert timeout value in
1500 * nanoseconds to implementations specific timeout value supplied in
1501 * rte_event_dequeue_burst()
1504 * The identifier of the device.
1506 * Wait time in nanosecond
1507 * @param[out] timeout_ticks
1508 * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
1512 * - -ENOTSUP if the device doesn't support timeouts
1513 * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
1514 * - other values < 0 on failure.
1516 * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1517 * @see rte_event_dev_configure()
1521 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1522 uint64_t *timeout_ticks);
1525 * Dequeue a burst of events objects or an event object from the event port
1526 * designated by its *event_port_id*, on an event device designated
1529 * rte_event_dequeue_burst() does not dictate the specifics of scheduling
1530 * algorithm as each eventdev driver may have different criteria to schedule
1531 * an event. However, in general, from an application perspective scheduler may
1532 * use the following scheme to dispatch an event to the port.
1534 * 1) Selection of event queue based on
1535 * a) The list of event queues are linked to the event port.
1536 * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
1537 * queue selection from list is based on event queue priority relative to
1538 * other event queue supplied as *priority* in rte_event_queue_setup()
1539 * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
1540 * queue selection from the list is based on event priority supplied as
1541 * *priority* in rte_event_enqueue_burst()
1542 * 2) Selection of event
1543 * a) The number of flows available in selected event queue.
1544 * b) Schedule type method associated with the event
1546 * The *nb_events* parameter is the maximum number of event objects to dequeue
1547 * which are returned in the *ev* array of *rte_event* structure.
1549 * The rte_event_dequeue_burst() function returns the number of events objects
1550 * it actually dequeued. A return value equal to *nb_events* means that all
1551 * event objects have been dequeued.
1553 * The number of events dequeued is the number of scheduler contexts held by
1554 * this port. These contexts are automatically released in the next
1555 * rte_event_dequeue_burst() invocation if the port supports implicit
1556 * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
1557 * operation can be used to release the contexts early.
1559 * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
1560 * enqueued to the same port that their associated events were dequeued from.
1563 * The identifier of the device.
1565 * The identifier of the event port.
1567 * Points to an array of *nb_events* objects of type *rte_event* structure
1568 * for output to be populated with the dequeued event objects.
1570 * The maximum number of event objects to dequeue, typically number of
1571 * rte_event_port_dequeue_depth() available for this port.
1573 * @param timeout_ticks
1574 * - 0 no-wait, returns immediately if there is no event.
1575 * - >0 wait for the event, if the device is configured with
1576 * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
1577 * at least one event is available or *timeout_ticks* time.
1578 * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
1579 * then this function will wait until the event available or
1580 * *dequeue_timeout_ns* ns which was previously supplied to
1581 * rte_event_dev_configure()
1584 * The number of event objects actually dequeued from the port. The return
1585 * value can be less than the value of the *nb_events* parameter when the
1586 * event port's queue is not full.
1588 * @see rte_event_port_dequeue_depth()
1590 static inline uint16_t
1591 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1592 uint16_t nb_events, uint64_t timeout_ticks)
1594 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1596 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1597 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1602 if (port_id >= dev->data->nb_ports) {
1609 * Allow zero cost non burst mode routine invocation if application
1610 * requests nb_events as const one
1613 return (*dev->dequeue)(
1614 dev->data->ports[port_id], ev, timeout_ticks);
1616 return (*dev->dequeue_burst)(
1617 dev->data->ports[port_id], ev, nb_events,
1622 * Link multiple source event queues supplied in *queues* to the destination
1623 * event port designated by its *port_id* with associated service priority
1624 * supplied in *priorities* on the event device designated by its *dev_id*.
1626 * The link establishment shall enable the event port *port_id* from
1627 * receiving events from the specified event queue(s) supplied in *queues*
1629 * An event queue may link to one or more event ports.
1630 * The number of links can be established from an event queue to event port is
1631 * implementation defined.
1633 * Event queue(s) to event port link establishment can be changed at runtime
1634 * without re-configuring the device to support scaling and to reduce the
1635 * latency of critical work by establishing the link with more event ports
1639 * The identifier of the device.
1642 * Event port identifier to select the destination port to link.
1645 * Points to an array of *nb_links* event queues to be linked
1646 * to the event port.
1647 * NULL value is allowed, in which case this function links all the configured
1648 * event queues *nb_event_queues* which previously supplied to
1649 * rte_event_dev_configure() to the event port *port_id*
1652 * Points to an array of *nb_links* service priorities associated with each
1653 * event queue link to event port.
1654 * The priority defines the event port's servicing priority for
1655 * event queue, which may be ignored by an implementation.
1656 * The requested priority should in the range of
1657 * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
1658 * The implementation shall normalize the requested priority to
1659 * implementation supported priority value.
1660 * NULL value is allowed, in which case this function links the event queues
1661 * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
1664 * The number of links to establish. This parameter is ignored if queues is
1668 * The number of links actually established. The return value can be less than
1669 * the value of the *nb_links* parameter when the implementation has the
1670 * limitation on specific queue to port link establishment or if invalid
1671 * parameters are specified in *queues*
1672 * If the return value is less than *nb_links*, the remaining links at the end
1673 * of link[] are not established, and the caller has to take care of them.
1674 * If return value is less than *nb_links* then implementation shall update the
1675 * rte_errno accordingly, Possible rte_errno values are
1676 * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
1677 * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
1678 * (EINVAL) Invalid parameter
1682 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1683 const uint8_t queues[], const uint8_t priorities[],
1687 * Unlink multiple source event queues supplied in *queues* from the destination
1688 * event port designated by its *port_id* on the event device designated
1691 * The unlink call issues an async request to disable the event port *port_id*
1692 * from receiving events from the specified event queue *queue_id*.
1693 * Event queue(s) to event port unlink establishment can be changed at runtime
1694 * without re-configuring the device.
1696 * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
1699 * The identifier of the device.
1702 * Event port identifier to select the destination port to unlink.
1705 * Points to an array of *nb_unlinks* event queues to be unlinked
1706 * from the event port.
1707 * NULL value is allowed, in which case this function unlinks all the
1708 * event queue(s) from the event port *port_id*.
1711 * The number of unlinks to establish. This parameter is ignored if queues is
1715 * The number of unlinks successfully requested. The return value can be less
1716 * than the value of the *nb_unlinks* parameter when the implementation has the
1717 * limitation on specific queue to port unlink establishment or
1718 * if invalid parameters are specified.
1719 * If the return value is less than *nb_unlinks*, the remaining queues at the
1720 * end of queues[] are not unlinked, and the caller has to take care of them.
1721 * If return value is less than *nb_unlinks* then implementation shall update
1722 * the rte_errno accordingly, Possible rte_errno values are
1723 * (EINVAL) Invalid parameter
1726 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1727 uint8_t queues[], uint16_t nb_unlinks);
1730 * Returns the number of unlinks in progress.
1732 * This function provides the application with a method to detect when an
1733 * unlink has been completed by the implementation.
1735 * @see rte_event_port_unlink() to issue unlink requests.
1738 * The identifier of the device.
1741 * Event port identifier to select port to check for unlinks in progress.
1744 * The number of unlinks that are in progress. A return of zero indicates that
1745 * there are no outstanding unlink requests. A positive return value indicates
1746 * the number of unlinks that are in progress, but are not yet complete.
1747 * A negative return value indicates an error, -EINVAL indicates an invalid
1748 * parameter passed for *dev_id* or *port_id*.
1751 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1754 * Retrieve the list of source event queues and its associated service priority
1755 * linked to the destination event port designated by its *port_id*
1756 * on the event device designated by its *dev_id*.
1759 * The identifier of the device.
1762 * Event port identifier.
1764 * @param[out] queues
1765 * Points to an array of *queues* for output.
1766 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1767 * store the event queue(s) linked with event port *port_id*
1769 * @param[out] priorities
1770 * Points to an array of *priorities* for output.
1771 * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
1772 * store the service priority associated with each event queue linked
1775 * The number of links established on the event port designated by its
1781 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1782 uint8_t queues[], uint8_t priorities[]);
1785 * Retrieve the service ID of the event dev. If the adapter doesn't use
1786 * a rte_service function, this function returns -ESRCH.
1789 * The identifier of the device.
1791 * @param [out] service_id
1792 * A pointer to a uint32_t, to be filled in with the service id.
1796 * - <0: Error code on failure, if the event dev doesn't use a rte_service
1797 * function, this function returns -ESRCH.
1800 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1803 * Dump internal information about *dev_id* to the FILE* provided in *f*.
1806 * The identifier of the device.
1809 * A pointer to a file for output
1816 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1818 /** Maximum name length for extended statistics counters */
1819 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1822 * Selects the component of the eventdev to retrieve statistics from.
1824 enum rte_event_dev_xstats_mode {
1825 RTE_EVENT_DEV_XSTATS_DEVICE,
1826 RTE_EVENT_DEV_XSTATS_PORT,
1827 RTE_EVENT_DEV_XSTATS_QUEUE,
1831 * A name-key lookup element for extended statistics.
1833 * This structure is used to map between names and ID numbers
1834 * for extended ethdev statistics.
1836 struct rte_event_dev_xstats_name {
1837 char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1841 * Retrieve names of extended statistics of an event device.
1844 * The identifier of the event device.
1846 * The mode of statistics to retrieve. Choices include the device statistics,
1847 * port statistics or queue statistics.
1848 * @param queue_port_id
1849 * Used to specify the port or queue number in queue or port mode, and is
1850 * ignored in device mode.
1851 * @param[out] xstats_names
1852 * Block of memory to insert names into. Must be at least size in capacity.
1853 * If set to NULL, function returns required capacity.
1855 * Block of memory to insert ids into. Must be at least size in capacity.
1856 * If set to NULL, function returns required capacity. The id values returned
1857 * can be passed to *rte_event_dev_xstats_get* to select statistics.
1859 * Capacity of xstats_names (number of names).
1861 * - positive value lower or equal to size: success. The return value
1862 * is the number of entries filled in the stats table.
1863 * - positive value higher than size: error, the given statistics table
1864 * is too small. The return value corresponds to the size that should
1865 * be given to succeed. The entries in the table are not valid and
1866 * shall not be used by the caller.
1867 * - negative value on error:
1868 * -ENODEV for invalid *dev_id*
1869 * -EINVAL for invalid mode, queue port or id parameters
1870 * -ENOTSUP if the device doesn't support this function.
1873 rte_event_dev_xstats_names_get(uint8_t dev_id,
1874 enum rte_event_dev_xstats_mode mode,
1875 uint8_t queue_port_id,
1876 struct rte_event_dev_xstats_name *xstats_names,
1881 * Retrieve extended statistics of an event device.
1884 * The identifier of the device.
1886 * The mode of statistics to retrieve. Choices include the device statistics,
1887 * port statistics or queue statistics.
1888 * @param queue_port_id
1889 * Used to specify the port or queue number in queue or port mode, and is
1890 * ignored in device mode.
1892 * The id numbers of the stats to get. The ids can be got from the stat
1893 * position in the stat list from rte_event_dev_get_xstats_names(), or
1894 * by using rte_event_dev_xstats_by_name_get().
1895 * @param[out] values
1896 * The values for each stats request by ID.
1898 * The number of stats requested
1900 * - positive value: number of stat entries filled into the values array
1901 * - negative value on error:
1902 * -ENODEV for invalid *dev_id*
1903 * -EINVAL for invalid mode, queue port or id parameters
1904 * -ENOTSUP if the device doesn't support this function.
1907 rte_event_dev_xstats_get(uint8_t dev_id,
1908 enum rte_event_dev_xstats_mode mode,
1909 uint8_t queue_port_id,
1910 const unsigned int ids[],
1911 uint64_t values[], unsigned int n);
1914 * Retrieve the value of a single stat by requesting it by name.
1917 * The identifier of the device
1919 * The stat name to retrieve
1921 * If non-NULL, the numerical id of the stat will be returned, so that further
1922 * requests for the stat can be got using rte_event_dev_xstats_get, which will
1923 * be faster as it doesn't need to scan a list of names for the stat.
1924 * If the stat cannot be found, the id returned will be (unsigned)-1.
1926 * - positive value or zero: the stat value
1927 * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
1930 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1934 * Reset the values of the xstats of the selected component in the device.
1937 * The identifier of the device
1939 * The mode of the statistics to reset. Choose from device, queue or port.
1940 * @param queue_port_id
1941 * The queue or port to reset. 0 and positive values select ports and queues,
1942 * while -1 indicates all ports or queues.
1944 * Selects specific statistics to be reset. When NULL, all statistics selected
1945 * by *mode* will be reset. If non-NULL, must point to array of at least
1948 * The number of ids available from the *ids* array. Ignored when ids is NULL.
1950 * - zero: successfully reset the statistics to zero
1951 * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
1954 rte_event_dev_xstats_reset(uint8_t dev_id,
1955 enum rte_event_dev_xstats_mode mode,
1956 int16_t queue_port_id,
1957 const uint32_t ids[],
1961 * Trigger the eventdev self test.
1964 * The identifier of the device
1966 * - 0: Selftest successful
1967 * - -ENOTSUP if the device doesn't support selftest
1968 * - other values < 0 on failure.
1970 int rte_event_dev_selftest(uint8_t dev_id);
1976 #endif /* _RTE_EVENTDEV_H_ */