#endif
#include <rte_common.h>
-#include <rte_pci.h>
-#include <rte_mbuf.h>
+#include <rte_memory.h>
+
+struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
/* Event device capability bitmap flags */
#define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
*/
struct rte_event_dev_info {
const char *driver_name; /**< Event driver name */
- struct rte_pci_device *pci_dev; /**< PCI information */
+ struct rte_device *dev; /**< Device information */
uint32_t min_dequeue_timeout_ns;
/**< Minimum supported global dequeue timeout(ns) by this device */
uint32_t max_dequeue_timeout_ns;
* @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
*/
int32_t nb_events_limit;
- /**< Applies to *closed system* event dev only. This field indicates a
- * limit to ethdev-like devices to limit the number of events injected
- * into the system to not overwhelm core-to-core events.
- * This value cannot exceed the *max_num_events* which previously
- * provided in rte_event_dev_info_get()
+ /**< In a *closed system* this field is the limit on maximum number of
+ * events that can be inflight in the eventdev at a given time. The
+ * limit is required to ensure that the finite space in a closed system
+ * is not overwhelmed. The value cannot exceed the *max_num_events*
+ * as provided by rte_event_dev_info_get().
+ * This value should be set to -1 for *open system*.
*/
uint8_t nb_event_queues;
/**< Number of event queues to configure on this device.
* This value cannot exceed the *max_event_queue_flows* which previously
* provided in rte_event_dev_info_get()
*/
- uint8_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_dequeue_depth;
/**< Maximum number of events can be dequeued at a time from an
* event port by this device.
* This value cannot exceed the *max_event_port_dequeue_depth*
* can have a lower threshold so as not to overwhelm the device,
* while ports used for worker pools can have a higher threshold.
* This value cannot exceed the *nb_events_limit*
- * which previously supplied to rte_event_dev_configure()
+ * which was previously supplied to rte_event_dev_configure().
+ * This should be set to '-1' for *open system*.
*/
- uint8_t dequeue_depth;
+ uint16_t dequeue_depth;
/**< Configure number of bulk dequeues for this event port.
* This value cannot exceed the *nb_event_port_dequeue_depth*
* which previously supplied to rte_event_dev_configure()
*/
- uint8_t enqueue_depth;
+ uint16_t enqueue_depth;
/**< Configure number of bulk enqueues for this event port.
* This value cannot exceed the *nb_event_port_enqueue_depth*
* which previously supplied to rte_event_dev_configure()
};
};
+
+struct rte_eventdev_driver;
struct rte_eventdev_ops;
struct rte_eventdev;
uint16_t nb_events, uint64_t timeout_ticks);
/**< @internal Dequeue burst of events from port of a device */
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+ int socket_id;
+ /**< Socket ID where memory is allocated */
+ uint8_t dev_id;
+ /**< Device ID for this instance */
+ uint8_t nb_queues;
+ /**< Number of event queues. */
+ uint8_t nb_ports;
+ /**< Number of event ports. */
+ void **ports;
+ /**< Array of pointers to ports. */
+ uint8_t *ports_dequeue_depth;
+ /**< Array of port dequeue depth. */
+ uint8_t *ports_enqueue_depth;
+ /**< Array of port enqueue depth. */
+ uint8_t *queues_prio;
+ /**< Array of queue priority. */
+ uint16_t *links_map;
+ /**< Memory to store queues to port connections. */
+ void *dev_private;
+ /**< PMD-specific private data */
+ uint32_t event_dev_cap;
+ /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+ struct rte_event_dev_config dev_conf;
+ /**< Configuration applied to device. */
+
+ RTE_STD_C11
+ uint8_t dev_started : 1;
+ /**< Device state: STARTED(1)/STOPPED(0) */
+
+ char name[RTE_EVENTDEV_NAME_MAX_LEN];
+ /**< Unique identifier name */
+} __rte_cache_aligned;
/** @internal The data structure associated with each event device. */
struct rte_eventdev {
event_dequeue_burst_t dequeue_burst;
/**< Pointer to PMD dequeue burst function. */
+ struct rte_eventdev_data *data;
+ /**< Pointer to device data */
+ const struct rte_eventdev_ops *dev_ops;
+ /**< Functions exported by PMD */
+ struct rte_device *dev;
+ /**< Device info. supplied by probing */
+ const struct rte_eventdev_driver *driver;
+ /**< Driver for this device */
+
+ RTE_STD_C11
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
} __rte_cache_aligned;
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
/**
* Schedule one or more events in the event dev.
* @param dev_id
* The identifier of the device.
*/
-void
-rte_event_schedule(uint8_t dev_id);
+static inline void
+rte_event_schedule(uint8_t dev_id)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ if (*dev->schedule)
+ (*dev->schedule)(dev);
+}
/**
* Enqueue a burst of events objects or an event object supplied in *rte_event*
*
* @see rte_event_port_enqueue_depth()
*/
-uint16_t
+static inline uint16_t
rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
- const struct rte_event ev[], uint16_t nb_events);
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->enqueue)(
+ dev->data->ports[port_id], ev);
+ else
+ return (*dev->enqueue_burst)(
+ dev->data->ports[port_id], ev, nb_events);
+}
/**
* Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
*
* @return
* - 0 on success.
- * - <0 on failure.
+ * - -ENOTSUP if the device doesn't support timeouts
+ * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
+ * - other values < 0 on failure.
*
* @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
* @see rte_event_dev_configure()
* - 0 no-wait, returns immediately if there is no event.
* - >0 wait for the event, if the device is configured with
* RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
- * the event available or *timeout_ticks* time.
+ * at least one event is available or *timeout_ticks* time.
* if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
* then this function will wait until the event available or
* *dequeue_timeout_ns* ns which was previously supplied to
*
* @see rte_event_port_dequeue_depth()
*/
-uint16_t
+static inline uint16_t
rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
- uint16_t nb_events, uint64_t timeout_ticks);
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->dequeue)(
+ dev->data->ports[port_id], ev, timeout_ticks);
+ else
+ return (*dev->dequeue_burst)(
+ dev->data->ports[port_id], ev, nb_events,
+ timeout_ticks);
+}
/**
* Link multiple source event queues supplied in *queues* to the destination
* with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
*
* @param nb_links
- * The number of links to establish
+ * The number of links to establish. This parameter is ignored if queues is
+ * NULL.
*
* @return
* The number of links actually established. The return value can be less than
* event queue(s) from the event port *port_id*.
*
* @param nb_unlinks
- * The number of unlinks to establish
+ * The number of unlinks to establish. This parameter is ignored if queues is
+ * NULL.
*
* @return
* The number of unlinks actually established. The return value can be less