*
* @see rte_event_port_setup(), rte_event_port_link()
*/
+#define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
+/**< Hint that this event port will primarily enqueue events to the system.
+ * A PMD can optimize its internal workings by assuming that this port is
+ * primarily going to enqueue NEW events.
+ *
+ * Note that this flag is only a hint, so PMDs must operate under the
+ * assumption that any port can enqueue an event with any type of op.
+ *
+ * @see rte_event_port_setup()
+ */
+#define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
+/**< Hint that this event port will primarily dequeue events from the system.
+ * A PMD can optimize its internal workings by assuming that this port is
+ * primarily going to consume events, and not enqueue FORWARD or RELEASE
+ * events.
+ *
+ * Note that this flag is only a hint, so PMDs must operate under the
+ * assumption that any port can enqueue an event with any type of op.
+ *
+ * @see rte_event_port_setup()
+ */
+#define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
+/**< Hint that this event port will primarily pass existing events through.
+ * A PMD can optimize its internal workings by assuming that this port is
+ * primarily going to FORWARD events, and not enqueue NEW or RELEASE events
+ * often.
+ *
+ * Note that this flag is only a hint, so PMDs must operate under the
+ * assumption that any port can enqueue an event with any type of op.
+ *
+ * @see rte_event_port_setup()
+ */
/** Event port configuration structure */
struct rte_event_port_conf {
#define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
/**< The application can override the adapter generated flow ID in the
* event. This flow ID can be specified when adding an ethdev Rx queue
- * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * to the adapter using the ev.flow_id member.
* @see struct rte_event_eth_rx_adapter_queue_conf::ev
* @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
*/
* - ENOMEM - no appropriate memory area found in which to create memzone
* - ENAMETOOLONG - mempool name requested is too long.
*/
-__rte_experimental
struct rte_mempool *
rte_event_vector_pool_create(const char *name, unsigned int n,
unsigned int cache_size, uint16_t nb_elem,
const struct rte_event ev[], uint16_t nb_events,
const event_enqueue_burst_t fn)
{
- const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ const struct rte_event_fp_ops *fp_ops;
+ void *port;
+ fp_ops = &rte_event_fp_ops[dev_id];
+ port = fp_ops->data[port_id];
#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
- if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ if (dev_id >= RTE_EVENT_MAX_DEVS ||
+ port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
rte_errno = EINVAL;
return 0;
}
- if (port_id >= dev->data->nb_ports) {
+ if (port == NULL) {
rte_errno = EINVAL;
return 0;
}
* requests nb_events as const one
*/
if (nb_events == 1)
- return (*dev->enqueue)(dev->data->ports[port_id], ev);
+ return (fp_ops->enqueue)(port, ev);
else
- return fn(dev->data->ports[port_id], ev, nb_events);
+ return fn(port, ev, nb_events);
}
/**
rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
const struct rte_event ev[], uint16_t nb_events)
{
- const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ const struct rte_event_fp_ops *fp_ops;
+ fp_ops = &rte_event_fp_ops[dev_id];
return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
- dev->enqueue_burst);
+ fp_ops->enqueue_burst);
}
/**
rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
const struct rte_event ev[], uint16_t nb_events)
{
- const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ const struct rte_event_fp_ops *fp_ops;
+ fp_ops = &rte_event_fp_ops[dev_id];
return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
- dev->enqueue_new_burst);
+ fp_ops->enqueue_new_burst);
}
/**
rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
const struct rte_event ev[], uint16_t nb_events)
{
- const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ const struct rte_event_fp_ops *fp_ops;
+ fp_ops = &rte_event_fp_ops[dev_id];
return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
- dev->enqueue_forward_burst);
+ fp_ops->enqueue_forward_burst);
}
/**
rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks)
{
- struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ const struct rte_event_fp_ops *fp_ops;
+ void *port;
+ fp_ops = &rte_event_fp_ops[dev_id];
+ port = fp_ops->data[port_id];
#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
- if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ if (dev_id >= RTE_EVENT_MAX_DEVS ||
+ port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
rte_errno = EINVAL;
return 0;
}
- if (port_id >= dev->data->nb_ports) {
+ if (port == NULL) {
rte_errno = EINVAL;
return 0;
}
* requests nb_events as const one
*/
if (nb_events == 1)
- return (*dev->dequeue)(dev->data->ports[port_id], ev,
- timeout_ticks);
+ return (fp_ops->dequeue)(port, ev, timeout_ticks);
else
- return (*dev->dequeue_burst)(dev->data->ports[port_id], ev,
- nb_events, timeout_ticks);
+ return (fp_ops->dequeue_burst)(port, ev, nb_events,
+ timeout_ticks);
}
#ifdef __cplusplus