* ``uint64_t u64``
* ``void *event_ptr``
* ``struct rte_mbuf *mbuf``
+* ``struct rte_event_vector *vec``
-These three items in a union occupy the same 64 bits at the end of the rte_event
+These four items in a union occupy the same 64 bits at the end of the rte_event
structure. The application can utilize the 64 bits directly by accessing the
-u64 variable, while the event_ptr and mbuf are provided as convenience
+u64 variable, while the event_ptr, mbuf, vec are provided as a convenience
variables. For example the mbuf pointer in the union can used to schedule a
DPDK packet.
+Event Vector
+~~~~~~~~~~~~
+
+The rte_event_vector struct contains a vector of elements defined by the event
+type specified in the ``rte_event``. The event_vector structure contains the
+following data:
+
+* ``nb_elem`` - The number of elements held within the vector.
+
+Similar to ``rte_event`` the payload of event vector is also a union, allowing
+flexibility in what the actual vector is.
+
+* ``struct rte_mbuf *mbufs[0]`` - An array of mbufs.
+* ``void *ptrs[0]`` - An array of pointers.
+* ``uint64_t *u64s[0]`` - An array of uint64_t elements.
+
+The size of the event vector is related to the total number of elements it is
+configured to hold, this is achieved by making `rte_event_vector` a variable
+length structure.
+A helper function is provided to create a mempool that holds event vector, which
+takes name of the pool, total number of required ``rte_event_vector``,
+cache size, number of elements in each ``rte_event_vector`` and socket id.
+
+.. code-block:: c
+
+ rte_event_vector_pool_create("vector_pool", nb_event_vectors, cache_sz,
+ nb_elements_per_vector, socket_id);
+
+The function ``rte_event_vector_pool_create`` creates mempool with the best
+platform mempool ops.
+
Queues
~~~~~~
* Added support for periodic timer mode in eventdev timer adapter.
* Added support for periodic timer mode in octeontx2 event device driver.
+* **Added event device vector capability.**
+
+ * Added ``rte_event_vector`` data structure which is capable of holding
+ multiple ``uintptr_t`` of the same flow thereby allowing applications
+ to vectorize their pipelines and also reduce the complexity of pipelining
+ the events across multiple stages.
+ * This also reduced the scheduling overhead on a event device.
+
* **Updated testpmd.**
* Added a command line option to configure forced speed for Ethernet port.
return -ENOTSUP;
}
+struct rte_mempool *
+rte_event_vector_pool_create(const char *name, unsigned int n,
+ unsigned int cache_size, uint16_t nb_elem,
+ int socket_id)
+{
+ const char *mp_ops_name;
+ struct rte_mempool *mp;
+ unsigned int elt_sz;
+ int ret;
+
+ if (!nb_elem) {
+ RTE_LOG(ERR, EVENTDEV,
+ "Invalid number of elements=%d requested\n", nb_elem);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ elt_sz =
+ sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
+ mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
+ 0);
+ if (mp == NULL)
+ return NULL;
+
+ mp_ops_name = rte_mbuf_best_mempool_ops();
+ ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
+ if (ret != 0) {
+ RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
+ goto err;
+ }
+
+ ret = rte_mempool_populate_default(mp);
+ if (ret < 0)
+ goto err;
+
+ return mp;
+err:
+ rte_mempool_free(mp);
+ rte_errno = -ret;
+ return NULL;
+}
+
int
rte_event_dev_start(uint8_t dev_id)
{
#include <rte_common.h>
#include <rte_config.h>
-#include <rte_memory.h>
#include <rte_errno.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
#include "rte_eventdev_trace_fp.h"
int
rte_event_dev_close(uint8_t dev_id);
+/**
+ * Event vector structure.
+ */
+struct rte_event_vector {
+ uint64_t nb_elem : 16;
+ /**< Number of elements in this event vector. */
+ uint64_t rsvd : 48;
+ /**< Reserved for future use */
+ uint64_t impl_opaque;
+ /**< Implementation specific opaque value.
+ * An implementation may use this field to hold implementation specific
+ * value to share between dequeue and enqueue operation.
+ * The application should not modify this field.
+ */
+ union {
+ struct rte_mbuf *mbufs[0];
+ void *ptrs[0];
+ uint64_t *u64s[0];
+ } __rte_aligned(16);
+ /**< Start of the vector array union. Depending upon the event type the
+ * vector array can be an array of mbufs or pointers or opaque u64
+ * values.
+ */
+};
+
/* Scheduler type definitions */
#define RTE_SCHED_TYPE_ORDERED 0
/**< Ordered scheduling
*/
#define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
/**< The event generated from event eth Rx adapter */
+#define RTE_EVENT_TYPE_VECTOR 0x8
+/**< Indicates that event is a vector.
+ * All vector event types should be a logical OR of EVENT_TYPE_VECTOR.
+ * This simplifies the pipeline design as one can split processing the events
+ * between vector events and normal event across event types.
+ * Example:
+ * if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
+ * // Classify and handle vector event.
+ * } else {
+ * // Classify and handle event.
+ * }
+ */
+#define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
+/**< The event vector generated from cpu for pipelining. */
+
#define RTE_EVENT_TYPE_MAX 0x10
/**< Maximum number of event types */
/**< Opaque event pointer */
struct rte_mbuf *mbuf;
/**< mbuf pointer if dequeued event is associated with mbuf */
+ struct rte_event_vector *vec;
+ /**< Event vector pointer. */
};
};
*/
int rte_event_dev_selftest(uint8_t dev_id);
+/**
+ * Get the memory required per event vector based on the number of elements per
+ * vector.
+ * This should be used to create the mempool that holds the event vectors.
+ *
+ * @param name
+ * The name of the vector pool.
+ * @param n
+ * The number of elements in the mbuf pool.
+ * @param cache_size
+ * Size of the per-core object cache. See rte_mempool_create() for
+ * details.
+ * @param nb_elem
+ * The number of elements that a single event vector should be able to hold.
+ * @param socket_id
+ * The socket identifier where the memory should be allocated. The
+ * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the
+ * reserved zone
+ *
+ * @return
+ * The pointer to the newly allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - cache size provided is too large, or priv_size is not aligned.
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ * - ENAMETOOLONG - mempool name requested is too long.
+ */
+__rte_experimental
+struct rte_mempool *
+rte_event_vector_pool_create(const char *name, unsigned int n,
+ unsigned int cache_size, uint16_t nb_elem,
+ int socket_id);
+
#ifdef __cplusplus
}
#endif
__rte_eventdev_trace_port_setup;
# added in 20.11
rte_event_pmd_pci_probe_named;
+
+ #added in 21.05
+ rte_event_vector_pool_create;
};
INTERNAL {