#include <rte_common.h>
#include <rte_config.h>
-#include <rte_memory.h>
#include <rte_errno.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#include "rte_eventdev_trace_fp.h"
struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
struct rte_event;
* single queue to each port or map a single queue to many port.
*/
+#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
+/**< Event device preserves the flow ID from the enqueued
+ * event to the dequeued event if the flag is set. Otherwise,
+ * the content of this field is implementation dependent.
+ */
+
/* Event device priority levels */
#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
/**< Highest priority expressed across eventdev subsystem
* event port by this device.
* A device that does not support bulk enqueue will set this as 1.
*/
+ uint8_t max_event_port_links;
+ /**< Maximum number of queues that can be linked to a single event
+ * port by this device.
+ */
int32_t max_num_events;
/**< A *closed system* event dev has a limit on the number of events it
* can manage at a time. An *open system* event dev does not have a
*/
uint32_t event_dev_cap;
/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+ uint8_t max_single_link_event_port_queue_pairs;
+ /**< Maximum number of event ports and queues that are optimized for
+ * (and only capable of) single-link configurations supported by this
+ * device. These ports and queues are not accounted for in
+ * max_event_ports or max_event_queues.
+ */
};
/**
*/
uint32_t event_dev_cfg;
/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+ uint8_t nb_single_link_event_port_queues;
+ /**< Number of event ports and queues that will be singly-linked to
+ * each other. These are a subset of the overall event ports and
+ * queues; this value cannot exceed *nb_event_ports* or
+ * *nb_event_queues*. If the device has ports and queues that are
+ * optimized for single-link usage, this field is a hint for how many
+ * to allocate; otherwise, regular event ports and queues can be used.
+ */
};
/**
rte_event_dev_configure(uint8_t dev_id,
const struct rte_event_dev_config *dev_conf);
-
/* Event queue specific APIs */
/* Event queue configuration bitmap flags */
/* Event port specific APIs */
+/* Event port configuration bitmap flags */
+#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
+/**< Configure the port not to release outstanding events in
+ * rte_event_dev_dequeue_burst(). If set, all events received through
+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
+ * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
+ */
+#define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
+/**< This event port links only to a single event queue.
+ *
+ * @see rte_event_port_setup(), rte_event_port_link()
+ */
+
/** Event port configuration structure */
struct rte_event_port_conf {
int32_t new_event_threshold;
* which previously supplied to rte_event_dev_configure().
* Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
*/
- uint8_t disable_implicit_release;
- /**< Configure the port not to release outstanding events in
- * rte_event_dev_dequeue_burst(). If true, all events received through
- * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
- * RTE_EVENT_OP_FORWARD. Must be false when the device is not
- * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
- */
+ uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
};
/**
* The new event threshold of the port
*/
#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
+/**
+ * The implicit release disable attribute of the port
+ */
+#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
/**
* Get an attribute from a port.
int
rte_event_dev_close(uint8_t dev_id);
+/**
+ * Event vector structure.
+ */
+struct rte_event_vector {
+ uint16_t nb_elem;
+ /**< Number of elements in this event vector. */
+ uint16_t rsvd : 15;
+ /**< Reserved for future use */
+ uint16_t attr_valid : 1;
+ /**< Indicates that the below union attributes have valid information.
+ */
+ union {
+ /* Used by Rx/Tx adapter.
+ * Indicates that all the elements in this vector belong to the
+ * same port and queue pair when originating from Rx adapter,
+ * valid only when event type is ETHDEV_VECTOR or
+ * ETH_RX_ADAPTER_VECTOR.
+ * Can also be used to indicate the Tx adapter the destination
+ * port and queue of the mbufs in the vector
+ */
+ struct {
+ uint16_t port;
+ /* Ethernet device port id. */
+ uint16_t queue;
+ /* Ethernet device queue id. */
+ };
+ };
+ /**< Union to hold common attributes of the vector array. */
+ uint64_t impl_opaque;
+ /**< Implementation specific opaque value.
+ * An implementation may use this field to hold implementation specific
+ * value to share between dequeue and enqueue operation.
+ * The application should not modify this field.
+ */
+ union {
+ struct rte_mbuf *mbufs[0];
+ void *ptrs[0];
+ uint64_t *u64s[0];
+ } __rte_aligned(16);
+ /**< Start of the vector array union. Depending upon the event type the
+ * vector array can be an array of mbufs or pointers or opaque u64
+ * values.
+ */
+};
+
/* Scheduler type definitions */
#define RTE_SCHED_TYPE_ORDERED 0
/**< Ordered scheduling
*/
#define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
/**< The event generated from event eth Rx adapter */
+#define RTE_EVENT_TYPE_VECTOR 0x8
+/**< Indicates that event is a vector.
+ * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
+ * This simplifies the pipeline design as one can split processing the events
+ * between vector events and normal event across event types.
+ * Example:
+ * if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
+ * // Classify and handle vector event.
+ * } else {
+ * // Classify and handle event.
+ * }
+ */
+#define RTE_EVENT_TYPE_ETHDEV_VECTOR \
+ (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
+/**< The event vector generated from ethdev subsystem */
+#define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
+/**< The event vector generated from cpu for pipelining. */
+#define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
+ (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
+/**< The event vector generated from eth Rx adapter. */
+
#define RTE_EVENT_TYPE_MAX 0x10
/**< Maximum number of event types */
/**< Opaque event pointer */
struct rte_mbuf *mbuf;
/**< mbuf pointer if dequeued event is associated with mbuf */
+ struct rte_event_vector *vec;
+ /**< Event vector pointer. */
};
};
* @see struct rte_event_eth_rx_adapter_queue_conf::ev
* @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
*/
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
+/**< Adapter supports event vectorization per ethdev. */
/**
* Retrieve the event device's ethdev Rx adapter capabilities for the
#define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
/**< This flag is set when the timer mechanism is in HW. */
+#define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
+/**< This flag is set if periodic mode is supported. */
+
/**
* Retrieve the event device's timer adapter capabilities.
*
#define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
/**< This flag is sent when the PMD supports a packet transmit callback
*/
+#define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
+/**< Indicates that the Tx adapter is capable of handling event vector of
+ * mbufs.
+ */
/**
* Retrieve the event device's eth Tx adapter capabilities
* burst having same destination Ethernet port & Tx queue.
*/
+typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
+ struct rte_event ev[], uint16_t nb_events);
+/**< @internal Enqueue burst of events on crypto adapter */
+
#define RTE_EVENTDEV_NAME_MAX_LEN (64)
/**< @internal Max length of name of event PMD */
char name[RTE_EVENTDEV_NAME_MAX_LEN];
/**< Unique identifier name */
+
+ uint64_t reserved_64s[4]; /**< Reserved for future fields */
+ void *reserved_ptrs[4]; /**< Reserved for future fields */
} __rte_cache_aligned;
/** @internal The data structure associated with each event device. */
RTE_STD_C11
uint8_t attached : 1;
/**< Flag indicating the device is attached */
+
+ event_crypto_adapter_enqueue ca_enqueue;
+ /**< Pointer to PMD crypto adapter enqueue function. */
+
+ uint64_t reserved_64s[4]; /**< Reserved for future fields */
+ void *reserved_ptrs[3]; /**< Reserved for future fields */
} __rte_cache_aligned;
extern struct rte_eventdev *rte_eventdevs;
return 0;
}
#endif
+ rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
/*
* Allow zero cost non burst mode routine invocation if application
* requests nb_events as const one
return 0;
}
#endif
-
+ rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
/*
* Allow zero cost non burst mode routine invocation if application
* requests nb_events as const one
*/
int rte_event_dev_selftest(uint8_t dev_id);
+/**
+ * Get the memory required per event vector based on the number of elements per
+ * vector.
+ * This should be used to create the mempool that holds the event vectors.
+ *
+ * @param name
+ * The name of the vector pool.
+ * @param n
+ * The number of elements in the mbuf pool.
+ * @param cache_size
+ * Size of the per-core object cache. See rte_mempool_create() for
+ * details.
+ * @param nb_elem
+ * The number of elements that a single event vector should be able to hold.
+ * @param socket_id
+ * The socket identifier where the memory should be allocated. The
+ * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
+ * reserved zone
+ *
+ * @return
+ * The pointer to the newly allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - cache size provided is too large, or priv_size is not aligned.
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ * - ENAMETOOLONG - mempool name requested is too long.
+ */
+__rte_experimental
+struct rte_mempool *
+rte_event_vector_pool_create(const char *name, unsigned int n,
+ unsigned int cache_size, uint16_t nb_elem,
+ int socket_id);
+
#ifdef __cplusplus
}
#endif