X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_eventdev.h;h=a9c496fb623a6676af43e37008657ed8cfed9d37;hb=f2cdd95f2d3f09ed84d33ba62d275e590b10fd67;hp=226f352ad2f268fee40380f660ee6c29c1672c10;hpb=6f26f8a0ec4f9f85dd294ae96d38de0cf1d71791;p=dpdk.git diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h index 226f352ad2..a9c496fb62 100644 --- a/lib/librte_eventdev/rte_eventdev.h +++ b/lib/librte_eventdev/rte_eventdev.h @@ -212,8 +212,12 @@ extern "C" { #include #include -#include #include +#include +#include +#include + +#include "rte_eventdev_trace_fp.h" struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ struct rte_event; @@ -289,6 +293,12 @@ struct rte_event; * single queue to each port or map a single queue to many port. */ +#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9) +/**< Event device preserves the flow ID from the enqueued + * event to the dequeued event if the flag is set. Otherwise, + * the content of this field is implementation dependent. + */ + /* Event device priority levels */ #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 /**< Highest priority expressed across eventdev subsystem @@ -378,6 +388,10 @@ struct rte_event_dev_info { * event port by this device. * A device that does not support bulk enqueue will set this as 1. */ + uint8_t max_event_port_links; + /**< Maximum number of queues that can be linked to a single event + * port by this device. + */ int32_t max_num_events; /**< A *closed system* event dev has a limit on the number of events it * can manage at a time. An *open system* event dev does not have a @@ -385,6 +399,12 @@ struct rte_event_dev_info { */ uint32_t event_dev_cap; /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ + uint8_t max_single_link_event_port_queue_pairs; + /**< Maximum number of event ports and queues that are optimized for + * (and only capable of) single-link configurations supported by this + * device. These ports and queues are not accounted for in + * max_event_ports or max_event_queues. + */ }; /** @@ -492,6 +512,14 @@ struct rte_event_dev_config { */ uint32_t event_dev_cfg; /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ + uint8_t nb_single_link_event_port_queues; + /**< Number of event ports and queues that will be singly-linked to + * each other. These are a subset of the overall event ports and + * queues; this value cannot exceed *nb_event_ports* or + * *nb_event_queues*. If the device has ports and queues that are + * optimized for single-link usage, this field is a hint for how many + * to allocate; otherwise, regular event ports and queues can be used. + */ }; /** @@ -517,7 +545,6 @@ int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf); - /* Event queue specific APIs */ /* Event queue configuration bitmap flags */ @@ -669,6 +696,20 @@ rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, /* Event port specific APIs */ +/* Event port configuration bitmap flags */ +#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0) +/**< Configure the port not to release outstanding events in + * rte_event_dev_dequeue_burst(). If set, all events received through + * the port must be explicitly released with RTE_EVENT_OP_RELEASE or + * RTE_EVENT_OP_FORWARD. Must be unset if the device is not + * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable. + */ +#define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1) +/**< This event port links only to a single event queue. + * + * @see rte_event_port_setup(), rte_event_port_link() + */ + /** Event port configuration structure */ struct rte_event_port_conf { int32_t new_event_threshold; @@ -696,13 +737,7 @@ struct rte_event_port_conf { * which previously supplied to rte_event_dev_configure(). * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. */ - uint8_t disable_implicit_release; - /**< Configure the port not to release outstanding events in - * rte_event_dev_dequeue_burst(). If true, all events received through - * the port must be explicitly released with RTE_EVENT_OP_RELEASE or - * RTE_EVENT_OP_FORWARD. Must be false when the device is not - * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable. - */ + uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */ }; /** @@ -767,6 +802,10 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, * The new event threshold of the port */ #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2 +/** + * The implicit release disable attribute of the port + */ +#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3 /** * Get an attribute from a port. @@ -876,6 +915,51 @@ rte_event_dev_stop_flush_callback_register(uint8_t dev_id, int rte_event_dev_close(uint8_t dev_id); +/** + * Event vector structure. + */ +struct rte_event_vector { + uint16_t nb_elem; + /**< Number of elements in this event vector. */ + uint16_t rsvd : 15; + /**< Reserved for future use */ + uint16_t attr_valid : 1; + /**< Indicates that the below union attributes have valid information. + */ + union { + /* Used by Rx/Tx adapter. + * Indicates that all the elements in this vector belong to the + * same port and queue pair when originating from Rx adapter, + * valid only when event type is ETHDEV_VECTOR or + * ETH_RX_ADAPTER_VECTOR. + * Can also be used to indicate the Tx adapter the destination + * port and queue of the mbufs in the vector + */ + struct { + uint16_t port; + /* Ethernet device port id. */ + uint16_t queue; + /* Ethernet device queue id. */ + }; + }; + /**< Union to hold common attributes of the vector array. */ + uint64_t impl_opaque; + /**< Implementation specific opaque value. + * An implementation may use this field to hold implementation specific + * value to share between dequeue and enqueue operation. + * The application should not modify this field. + */ + union { + struct rte_mbuf *mbufs[0]; + void *ptrs[0]; + uint64_t *u64s[0]; + } __rte_aligned(16); + /**< Start of the vector array union. Depending upon the event type the + * vector array can be an array of mbufs or pointers or opaque u64 + * values. + */ +}; + /* Scheduler type definitions */ #define RTE_SCHED_TYPE_ORDERED 0 /**< Ordered scheduling @@ -949,6 +1033,27 @@ rte_event_dev_close(uint8_t dev_id); */ #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 /**< The event generated from event eth Rx adapter */ +#define RTE_EVENT_TYPE_VECTOR 0x8 +/**< Indicates that event is a vector. + * All vector event types should be a logical OR of EVENT_TYPE_VECTOR. + * This simplifies the pipeline design as one can split processing the events + * between vector events and normal event across event types. + * Example: + * if (ev.event_type & RTE_EVENT_TYPE_VECTOR) { + * // Classify and handle vector event. + * } else { + * // Classify and handle event. + * } + */ +#define RTE_EVENT_TYPE_ETHDEV_VECTOR \ + (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV) +/**< The event vector generated from ethdev subsystem */ +#define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU) +/**< The event vector generated from cpu for pipelining. */ +#define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \ + (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER) +/**< The event vector generated from eth Rx adapter. */ + #define RTE_EVENT_TYPE_MAX 0x10 /**< Maximum number of event types */ @@ -1071,6 +1176,8 @@ struct rte_event { /**< Opaque event pointer */ struct rte_mbuf *mbuf; /**< mbuf pointer if dequeued event is associated with mbuf */ + struct rte_event_vector *vec; + /**< Event vector pointer. */ }; }; @@ -1090,6 +1197,8 @@ struct rte_event { * @see struct rte_event_eth_rx_adapter_queue_conf::ev * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags */ +#define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8 +/**< Adapter supports event vectorization per ethdev. */ /** * Retrieve the event device's ethdev Rx adapter capabilities for the @@ -1117,6 +1226,9 @@ rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0) /**< This flag is set when the timer mechanism is in HW. */ +#define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1) +/**< This flag is set if periodic mode is supported. */ + /** * Retrieve the event device's timer adapter capabilities. * @@ -1186,6 +1298,10 @@ rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1 /**< This flag is sent when the PMD supports a packet transmit callback */ +#define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2 +/**< Indicates that the Tx adapter is capable of handling event vector of + * mbufs. + */ /** * Retrieve the event device's eth Tx adapter capabilities @@ -1236,6 +1352,10 @@ typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port, * burst having same destination Ethernet port & Tx queue. */ +typedef uint16_t (*event_crypto_adapter_enqueue)(void *port, + struct rte_event ev[], uint16_t nb_events); +/**< @internal Enqueue burst of events on crypto adapter */ + #define RTE_EVENTDEV_NAME_MAX_LEN (64) /**< @internal Max length of name of event PMD */ @@ -1318,8 +1438,11 @@ struct rte_eventdev { uint8_t attached : 1; /**< Flag indicating the device is attached */ + event_crypto_adapter_enqueue ca_enqueue; + /**< Pointer to PMD crypto adapter enqueue function. */ + uint64_t reserved_64s[4]; /**< Reserved for future fields */ - void *reserved_ptrs[4]; /**< Reserved for future fields */ + void *reserved_ptrs[3]; /**< Reserved for future fields */ } __rte_cache_aligned; extern struct rte_eventdev *rte_eventdevs; @@ -1343,6 +1466,7 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, return 0; } #endif + rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn); /* * Allow zero cost non burst mode routine invocation if application * requests nb_events as const one @@ -1620,7 +1744,7 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], return 0; } #endif - + rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events); /* * Allow zero cost non burst mode routine invocation if application * requests nb_events as const one @@ -1985,6 +2109,42 @@ rte_event_dev_xstats_reset(uint8_t dev_id, */ int rte_event_dev_selftest(uint8_t dev_id); +/** + * Get the memory required per event vector based on the number of elements per + * vector. + * This should be used to create the mempool that holds the event vectors. + * + * @param name + * The name of the vector pool. + * @param n + * The number of elements in the mbuf pool. + * @param cache_size + * Size of the per-core object cache. See rte_mempool_create() for + * details. + * @param nb_elem + * The number of elements that a single event vector should be able to hold. + * @param socket_id + * The socket identifier where the memory should be allocated. The + * value can be *SOCKET_ID_ANY* if there is no NUMA constraint for the + * reserved zone + * + * @return + * The pointer to the newly allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large, or priv_size is not aligned. + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + * - ENAMETOOLONG - mempool name requested is too long. + */ +__rte_experimental +struct rte_mempool * +rte_event_vector_pool_create(const char *name, unsigned int n, + unsigned int cache_size, uint16_t nb_elem, + int socket_id); + #ifdef __cplusplus } #endif