X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_eventdev.h;h=b21c27179e971d0248bc2ac8846f28eb89a724dc;hb=30a7bd684e8dc450990e3eb9a29836b9a56a50d0;hp=707398782b5046c86db21b476e01bb3bda51e615;hpb=4f9cf0bc40e5a3434e978cdc2d15d61a34505ca2;p=dpdk.git diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h index 707398782b..b21c27179e 100644 --- a/lib/librte_eventdev/rte_eventdev.h +++ b/lib/librte_eventdev/rte_eventdev.h @@ -1,7 +1,7 @@ /* * BSD LICENSE * - * Copyright 2016 Cavium. + * Copyright 2016 Cavium, Inc. * Copyright 2016 Intel Corporation. * Copyright 2016 NXP. * @@ -15,7 +15,7 @@ * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Cavium nor the names of its + * * Neither the name of Cavium, Inc nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -199,20 +199,6 @@ * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue * functions to applications. * - * An event driven based application has following typical workflow on fastpath: - * \code{.c} - * while (1) { - * - * rte_event_schedule(dev_id); - * - * rte_event_dequeue(...); - * - * (event processing) - * - * rte_event_enqueue(...); - * } - * \endcode - * * The events are injected to event device through *enqueue* operation by * event producers in the system. The typical event producers are ethdev * subsystem for generating packet events, CPU(SW) for generating events based @@ -232,10 +218,19 @@ * (each worker thread schedules events to its own port) or centralized * (a dedicated thread schedules to all ports). Distributed software schedulers * perform the scheduling in rte_event_dequeue_burst(), whereas centralized - * scheduler logic is located in rte_event_schedule(). + * scheduler logic need a dedicated service core for scheduling. * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set * indicates the device is centralized and thus needs a dedicated scheduling - * thread that repeatedly calls rte_event_schedule(). + * thread that repeatedly calls software specific scheduling function. + * + * An event driven worker thread has following typical workflow on fastpath: + * \code{.c} + * while (1) { + * rte_event_dequeue_burst(...); + * (event processing) + * rte_event_enqueue_burst(...); + * } + * \endcode * */ @@ -244,7 +239,9 @@ extern "C" { #endif #include +#include #include +#include struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ @@ -267,9 +264,56 @@ struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ * In distributed scheduling mode, event scheduling happens in HW or * rte_event_dequeue_burst() or the combination of these two. * If the flag is not set then eventdev is centralized and thus needs a - * dedicated scheduling thread that repeatedly calls rte_event_schedule(). + * dedicated service core that acts as a scheduling thread . * - * @see rte_event_schedule(), rte_event_dequeue_burst() + * @see rte_event_dequeue_burst() + */ +#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3) +/**< Event device is capable of enqueuing events of any type to any queue. + * If this capability is not set, the queue only supports events of the + * *RTE_SCHED_TYPE_* type that it was created with. + * + * @see RTE_SCHED_TYPE_* values + */ +#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4) +/**< Event device is capable of operating in burst mode for enqueue(forward, + * release) and dequeue operation. If this capability is not set, application + * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but + * PMD accepts only one event at a time. + * + * @see rte_event_dequeue_burst() rte_event_enqueue_burst() + */ +#define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5) +/**< Event device ports support disabling the implicit release feature, in + * which the port will release all unreleased events in its dequeue operation. + * If this capability is set and the port is configured with implicit release + * disabled, the application is responsible for explicitly releasing events + * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event + * enqueue operations. + * + * @see rte_event_dequeue_burst() rte_event_enqueue_burst() + */ + +#define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6) +/**< Event device is capable of operating in none sequential mode. The path + * of the event is not necessary to be sequential. Application can change + * the path of event at runtime. If the flag is not set, then event each event + * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is + * set, events may be sent to queues in any order. If the flag is not set, the + * eventdev will return an error when the application enqueues an event for a + * qid which is not the next in the sequence. + */ + +#define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7) +/**< Event device is capable of configuring the queue/port link at runtime. + * If the flag is not set, the eventdev queue/port link is only can be + * configured during initialization. + */ + +#define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8) +/**< Event device is capable of setting up the link between multiple queue + * with single port. If the flag is not set, the eventdev can only map a + * single queue to each port or map a single queue to many port. */ /* Event device priority levels */ @@ -330,7 +374,7 @@ rte_event_dev_socket_id(uint8_t dev_id); */ struct rte_event_dev_info { const char *driver_name; /**< Event driver name */ - struct rte_pci_device *pci_dev; /**< PCI information */ + struct rte_device *dev; /**< Device information */ uint32_t min_dequeue_timeout_ns; /**< Minimum supported global dequeue timeout(ns) by this device */ uint32_t max_dequeue_timeout_ns; @@ -388,6 +432,36 @@ struct rte_event_dev_info { int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info); +/** + * The count of ports. + */ +#define RTE_EVENT_DEV_ATTR_PORT_COUNT 0 +/** + * The count of queues. + */ +#define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1 +/** + * The status of the device, zero for stopped, non-zero for started. + */ +#define RTE_EVENT_DEV_ATTR_STARTED 2 + +/** + * Get an attribute from a device. + * + * @param dev_id Eventdev id + * @param attr_id The attribute ID to retrieve + * @param[out] attr_value A pointer that will be filled in with the attribute + * value if successful. + * + * @return + * - 0: Successfully retrieved attribute value + * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL + */ +int +rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, + uint32_t *attr_value); + + /* Event device configuration bitmap flags */ #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0) /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns. @@ -401,6 +475,7 @@ struct rte_event_dev_config { * This value should be in the range of *min_dequeue_timeout_ns* and * *max_dequeue_timeout_ns* which previously provided in * rte_event_dev_info_get() + * The value 0 is allowed, in which case, default dequeue timeout used. * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT */ int32_t nb_events_limit; @@ -426,18 +501,20 @@ struct rte_event_dev_config { * This value cannot exceed the *max_event_queue_flows* which previously * provided in rte_event_dev_info_get() */ - uint8_t nb_event_port_dequeue_depth; + uint32_t nb_event_port_dequeue_depth; /**< Maximum number of events can be dequeued at a time from an * event port by this device. * This value cannot exceed the *max_event_port_dequeue_depth* - * which previously provided in rte_event_dev_info_get() + * which previously provided in rte_event_dev_info_get(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. * @see rte_event_port_setup() */ uint32_t nb_event_port_enqueue_depth; /**< Maximum number of events can be enqueued at a time from an * event port by this device. * This value cannot exceed the *max_event_port_enqueue_depth* - * which previously provided in rte_event_dev_info_get() + * which previously provided in rte_event_dev_info_get(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. * @see rte_event_port_setup() */ uint32_t event_dev_cfg; @@ -471,45 +548,13 @@ rte_event_dev_configure(uint8_t dev_id, /* Event queue specific APIs */ /* Event queue configuration bitmap flags */ -#define RTE_EVENT_QUEUE_CFG_DEFAULT (0) -/**< Default value of *event_queue_cfg* when rte_event_queue_setup() invoked - * with queue_conf == NULL - * - * @see rte_event_queue_setup() - */ -#define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0) -/**< Mask for event queue schedule type configuration request */ -#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0) +#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0) /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue * * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL * @see rte_event_enqueue_burst() */ -#define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0) -/**< Allow only ATOMIC schedule type enqueue - * - * The rte_event_enqueue_burst() result is undefined if the queue configured - * with ATOMIC only and sched_type != RTE_SCHED_TYPE_ATOMIC - * - * @see RTE_SCHED_TYPE_ATOMIC, rte_event_enqueue_burst() - */ -#define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0) -/**< Allow only ORDERED schedule type enqueue - * - * The rte_event_enqueue_burst() result is undefined if the queue configured - * with ORDERED only and sched_type != RTE_SCHED_TYPE_ORDERED - * - * @see RTE_SCHED_TYPE_ORDERED, rte_event_enqueue_burst() - */ -#define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0) -/**< Allow only PARALLEL schedule type enqueue - * - * The rte_event_enqueue_burst() result is undefined if the queue configured - * with PARALLEL only and sched_type != RTE_SCHED_TYPE_PARALLEL - * - * @see RTE_SCHED_TYPE_PARALLEL, rte_event_enqueue_burst() - */ -#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2) +#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1) /**< This event queue links only to a single event port. * * @see rte_event_port_setup(), rte_event_port_link() @@ -519,9 +564,11 @@ rte_event_dev_configure(uint8_t dev_id, struct rte_event_queue_conf { uint32_t nb_atomic_flows; /**< The maximum number of active flows this queue can track at any - * given time. The value must be in the range of - * [1 - nb_event_queue_flows)] which previously provided in - * rte_event_dev_info_get(). + * given time. If the queue is configured for atomic scheduling (by + * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg + * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the + * value must be in the range of [1, nb_event_queue_flows], which was + * previously provided in rte_event_dev_configure(). */ uint32_t nb_atomic_order_sequences; /**< The maximum number of outstanding events waiting to be @@ -531,10 +578,19 @@ struct rte_event_queue_conf { * scheduler cannot schedule the events from this queue and invalid * event will be returned from dequeue until one or more entries are * freed up/released. - * The value must be in the range of [1 - nb_event_queue_flows)] - * which previously supplied to rte_event_dev_configure(). + * If the queue is configured for ordered scheduling (by applying the + * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or + * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must + * be in the range of [1, nb_event_queue_flows], which was + * previously supplied to rte_event_dev_configure(). + */ + uint32_t event_queue_cfg; + /**< Queue cfg flags(EVENT_QUEUE_CFG_) */ + uint8_t schedule_type; + /**< Queue schedule type(RTE_SCHED_TYPE_*). + * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in + * event_queue_cfg. */ - uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */ uint8_t priority; /**< Priority for this event queue relative to other event queues. * The requested priority should in the range of @@ -594,31 +650,49 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf); /** - * Get the number of event queues on a specific event device - * - * @param dev_id - * Event device identifier. - * @return - * - The number of configured event queues + * The priority of the queue. */ -uint8_t -rte_event_queue_count(uint8_t dev_id); +#define RTE_EVENT_QUEUE_ATTR_PRIORITY 0 +/** + * The number of atomic flows configured for the queue. + */ +#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1 +/** + * The number of atomic order sequences configured for the queue. + */ +#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2 +/** + * The cfg flags for the queue. + */ +#define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3 +/** + * The schedule type of the queue. + */ +#define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4 /** - * Get the priority of the event queue on a specific event device + * Get an attribute from a queue. * * @param dev_id - * Event device identifier. + * Eventdev id * @param queue_id - * Event queue identifier. + * Eventdev queue id + * @param attr_id + * The attribute ID to retrieve + * @param[out] attr_value + * A pointer that will be filled in with the attribute value if successful + * * @return - * - If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then the - * configured priority of the event queue in - * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST] range - * else the value RTE_EVENT_DEV_PRIORITY_NORMAL + * - 0: Successfully returned value + * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was + * NULL + * - -EOVERFLOW: returned when attr_id is set to + * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to + * RTE_EVENT_QUEUE_CFG_ALL_TYPES */ -uint8_t -rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id); +int +rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, + uint32_t *attr_value); /* Event port specific APIs */ @@ -637,15 +711,24 @@ struct rte_event_port_conf { * which was previously supplied to rte_event_dev_configure(). * This should be set to '-1' for *open system*. */ - uint8_t dequeue_depth; + uint16_t dequeue_depth; /**< Configure number of bulk dequeues for this event port. * This value cannot exceed the *nb_event_port_dequeue_depth* - * which previously supplied to rte_event_dev_configure() + * which previously supplied to rte_event_dev_configure(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. */ - uint8_t enqueue_depth; + uint16_t enqueue_depth; /**< Configure number of bulk enqueues for this event port. * This value cannot exceed the *nb_event_port_enqueue_depth* - * which previously supplied to rte_event_dev_configure() + * which previously supplied to rte_event_dev_configure(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. + */ + uint8_t disable_implicit_release; + /**< Configure the port not to release outstanding events in + * rte_event_dev_dequeue_burst(). If true, all events received through + * the port must be explicitly released with RTE_EVENT_OP_RELEASE or + * RTE_EVENT_OP_FORWARD. Must be false when the device is not + * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable. */ }; @@ -700,47 +783,37 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf); /** - * Get the number of dequeue queue depth configured for event port designated - * by its *port_id* on a specific event device - * - * @param dev_id - * Event device identifier. - * @param port_id - * Event port identifier. - * @return - * - The number of configured dequeue queue depth - * - * @see rte_event_dequeue_burst() + * The queue depth of the port on the enqueue side */ -uint8_t -rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id); - +#define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0 /** - * Get the number of enqueue queue depth configured for event port designated - * by its *port_id* on a specific event device - * - * @param dev_id - * Event device identifier. - * @param port_id - * Event port identifier. - * @return - * - The number of configured enqueue queue depth - * - * @see rte_event_enqueue_burst() + * The queue depth of the port on the dequeue side */ -uint8_t -rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id); +#define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1 +/** + * The new event threshold of the port + */ +#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2 /** - * Get the number of ports on a specific event device + * Get an attribute from a port. * * @param dev_id - * Event device identifier. + * Eventdev id + * @param port_id + * Eventdev port id + * @param attr_id + * The attribute ID to retrieve + * @param[out] attr_value + * A pointer that will be filled in with the attribute value if successful + * * @return - * - The number of configured ports + * - 0: Successfully returned value + * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL */ -uint8_t -rte_event_port_count(uint8_t dev_id); +int +rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, + uint32_t *attr_value); /** * Start an event device. @@ -755,7 +828,8 @@ rte_event_port_count(uint8_t dev_id); * Event device identifier * @return * - 0: Success, device started. - * - <0: Error code of the driver device start function. + * - -ESTALE : Not all ports of the device are configured + * - -ENOLINK: Not all queues are linked, which could lead to deadlock. */ int rte_event_dev_start(uint8_t dev_id); @@ -855,6 +929,8 @@ rte_event_dev_close(uint8_t dev_id); /**< The event generated from cpu for pipelining. * Application may use *sub_event_type* to further classify the event */ +#define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 +/**< The event generated from event eth Rx adapter */ #define RTE_EVENT_TYPE_MAX 0x10 /**< Maximum number of event types */ @@ -866,7 +942,10 @@ rte_event_dev_close(uint8_t dev_id); #define RTE_EVENT_OP_FORWARD 1 /**< The CPU use this operation to forward the event to different event queue or * change to new application specific flow or schedule type to enable - * pipelining + * pipelining. + * + * This operation must only be enqueued to the same port that the + * event to be forwarded was dequeued from. */ #define RTE_EVENT_OP_RELEASE 2 /**< Release the flow context associated with the schedule type. @@ -896,15 +975,18 @@ rte_event_dev_close(uint8_t dev_id); * or no scheduling context is held then this function may be an NOOP, * depending on the implementation. * + * This operation must only be enqueued to the same port that the + * event to be released was dequeued from. + * */ /** * The generic *rte_event* structure to hold the event attributes * for dequeue and enqueue operation */ +RTE_STD_C11 struct rte_event { /** WORD0 */ - RTE_STD_C11 union { uint64_t event; /** Event attributes for dequeue or enqueue operation */ @@ -964,7 +1046,6 @@ struct rte_event { }; }; /** WORD1 */ - RTE_STD_C11 union { uint64_t u64; /**< Opaque 64-bit value */ @@ -975,14 +1056,50 @@ struct rte_event { }; }; +/* Ethdev Rx adapter capability bitmap flags */ +#define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1 +/**< This flag is sent when the packet transfer mechanism is in HW. + * Ethdev can send packets to the event device using internal event port. + */ +#define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2 +/**< Adapter supports multiple event queues per ethdev. Every ethdev + * Rx queue can be connected to a unique event queue. + */ +#define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4 +/**< The application can override the adapter generated flow ID in the + * event. This flow ID can be specified when adding an ethdev Rx queue + * to the adapter using the ev member of struct rte_event_eth_rx_adapter + * @see struct rte_event_eth_rx_adapter_queue_conf::ev + * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags + */ + +/** + * Retrieve the event device's ethdev Rx adapter capabilities for the + * specified ethernet port + * + * @param dev_id + * The identifier of the device. + * + * @param eth_port_id + * The identifier of the ethernet device. + * + * @param[out] caps + * A pointer to memory filled with Rx event adapter capabilities. + * + * @return + * - 0: Success, driver provides Rx event adapter capabilities for the + * ethernet device. + * - <0: Error code returned by the driver function. + * + */ +int +rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id, + uint32_t *caps); struct rte_eventdev_driver; struct rte_eventdev_ops; struct rte_eventdev; -typedef void (*event_schedule_t)(struct rte_eventdev *dev); -/**< @internal Schedule one or more events in the event dev. */ - typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev); /**< @internal Enqueue event on port of a device */ @@ -1019,12 +1136,10 @@ struct rte_eventdev_data { /**< Number of event ports. */ void **ports; /**< Array of pointers to ports. */ - uint8_t *ports_dequeue_depth; - /**< Array of port dequeue depth. */ - uint8_t *ports_enqueue_depth; - /**< Array of port enqueue depth. */ - uint8_t *queues_prio; - /**< Array of queue priority. */ + struct rte_event_port_conf *ports_cfg; + /**< Array of port configuration structures. */ + struct rte_event_queue_conf *queues_cfg; + /**< Array of queue configuration structures. */ uint16_t *links_map; /**< Memory to store queues to port connections. */ void *dev_private; @@ -1033,6 +1148,10 @@ struct rte_eventdev_data { /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ struct rte_event_dev_config dev_conf; /**< Configuration applied to device. */ + uint8_t service_inited; + /* Service initialization state */ + uint32_t service_id; + /* Service ID*/ RTE_STD_C11 uint8_t dev_started : 1; @@ -1044,12 +1163,14 @@ struct rte_eventdev_data { /** @internal The data structure associated with each event device. */ struct rte_eventdev { - event_schedule_t schedule; - /**< Pointer to PMD schedule function. */ event_enqueue_t enqueue; /**< Pointer to PMD enqueue function. */ event_enqueue_burst_t enqueue_burst; /**< Pointer to PMD enqueue burst function. */ + event_enqueue_burst_t enqueue_new_burst; + /**< Pointer to PMD enqueue burst function(op new variant) */ + event_enqueue_burst_t enqueue_forward_burst; + /**< Pointer to PMD enqueue burst function(op forward variant) */ event_dequeue_t dequeue; /**< Pointer to PMD dequeue function. */ event_dequeue_burst_t dequeue_burst; @@ -1059,10 +1180,8 @@ struct rte_eventdev { /**< Pointer to device data */ const struct rte_eventdev_ops *dev_ops; /**< Functions exported by PMD */ - struct rte_pci_device *pci_dev; - /**< PCI info. supplied by probing */ - const struct rte_eventdev_driver *driver; - /**< Driver for this device */ + struct rte_device *dev; + /**< Device info. supplied by probing */ RTE_STD_C11 uint8_t attached : 1; @@ -1072,22 +1191,32 @@ struct rte_eventdev { extern struct rte_eventdev *rte_eventdevs; /** @internal The pool of rte_eventdev structures. */ - -/** - * Schedule one or more events in the event dev. - * - * An event dev implementation may define this is a NOOP, for instance if - * the event dev performs its scheduling in hardware. - * - * @param dev_id - * The identifier of the device. - */ -static inline void -rte_event_schedule(uint8_t dev_id) +static __rte_always_inline uint16_t +__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, + const struct rte_event ev[], uint16_t nb_events, + const event_enqueue_burst_t fn) { - struct rte_eventdev *dev = &rte_eventdevs[dev_id]; - if (*dev->schedule) - (*dev->schedule)(dev); + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG + if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { + rte_errno = -EINVAL; + return 0; + } + + if (port_id >= dev->data->nb_ports) { + rte_errno = -EINVAL; + return 0; + } +#endif + /* + * Allow zero cost non burst mode routine invocation if application + * requests nb_events as const one + */ + if (nb_events == 1) + return (*dev->enqueue)(dev->data->ports[port_id], ev); + else + return fn(dev->data->ports[port_id], ev, nb_events); } /** @@ -1099,6 +1228,9 @@ rte_event_schedule(uint8_t dev_id) * The *nb_events* parameter is the number of event objects to enqueue which are * supplied in the *ev* array of *rte_event* structure. * + * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be + * enqueued to the same port that their associated events were dequeued from. + * * The rte_event_enqueue_burst() function returns the number of * events objects it actually enqueued. A return value equal to *nb_events* * means that all event objects have been enqueued. @@ -1118,27 +1250,123 @@ rte_event_schedule(uint8_t dev_id) * The number of event objects actually enqueued on the event device. The * return value can be less than the value of the *nb_events* parameter when * the event devices queue is full or if invalid parameters are specified in a - * *rte_event*. If return value is less than *nb_events*, the remaining events - * at the end of ev[] are not consumed,and the caller has to take care of them - * + * *rte_event*. If the return value is less than *nb_events*, the remaining + * events at the end of ev[] are not consumed and the caller has to take care + * of them, and rte_errno is set accordingly. Possible errno values include: + * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * ID is invalid, or an event's sched type doesn't match the + * capabilities of the destination queue. + * - -ENOSPC The event port was backpressured and unable to enqueue + * one or more events. This error code is only applicable to + * closed systems. * @see rte_event_port_enqueue_depth() */ static inline uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events) { - struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; - /* - * Allow zero cost non burst mode routine invocation if application - * requests nb_events as const one - */ - if (nb_events == 1) - return (*dev->enqueue)( - dev->data->ports[port_id], ev); - else - return (*dev->enqueue_burst)( - dev->data->ports[port_id], ev, nb_events); + return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, + dev->enqueue_burst); +} + +/** + * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on + * an event device designated by its *dev_id* through the event port specified + * by *port_id*. + * + * Provides the same functionality as rte_event_enqueue_burst(), expect that + * application can use this API when the all objects in the burst contains + * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized + * function can provide the additional hint to the PMD and optimize if possible. + * + * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst + * has event object of operation type != RTE_EVENT_OP_NEW. + * + * @param dev_id + * The identifier of the device. + * @param port_id + * The identifier of the event port. + * @param ev + * Points to an array of *nb_events* objects of type *rte_event* structure + * which contain the event object enqueue operations to be processed. + * @param nb_events + * The number of event objects to enqueue, typically number of + * rte_event_port_enqueue_depth() available for this port. + * + * @return + * The number of event objects actually enqueued on the event device. The + * return value can be less than the value of the *nb_events* parameter when + * the event devices queue is full or if invalid parameters are specified in a + * *rte_event*. If the return value is less than *nb_events*, the remaining + * events at the end of ev[] are not consumed and the caller has to take care + * of them, and rte_errno is set accordingly. Possible errno values include: + * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * ID is invalid, or an event's sched type doesn't match the + * capabilities of the destination queue. + * - -ENOSPC The event port was backpressured and unable to enqueue + * one or more events. This error code is only applicable to + * closed systems. + * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst() + */ +static inline uint16_t +rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, + const struct rte_event ev[], uint16_t nb_events) +{ + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, + dev->enqueue_new_burst); +} + +/** + * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD* + * on an event device designated by its *dev_id* through the event port + * specified by *port_id*. + * + * Provides the same functionality as rte_event_enqueue_burst(), expect that + * application can use this API when the all objects in the burst contains + * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized + * function can provide the additional hint to the PMD and optimize if possible. + * + * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst + * has event object of operation type != RTE_EVENT_OP_FORWARD. + * + * @param dev_id + * The identifier of the device. + * @param port_id + * The identifier of the event port. + * @param ev + * Points to an array of *nb_events* objects of type *rte_event* structure + * which contain the event object enqueue operations to be processed. + * @param nb_events + * The number of event objects to enqueue, typically number of + * rte_event_port_enqueue_depth() available for this port. + * + * @return + * The number of event objects actually enqueued on the event device. The + * return value can be less than the value of the *nb_events* parameter when + * the event devices queue is full or if invalid parameters are specified in a + * *rte_event*. If the return value is less than *nb_events*, the remaining + * events at the end of ev[] are not consumed and the caller has to take care + * of them, and rte_errno is set accordingly. Possible errno values include: + * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * ID is invalid, or an event's sched type doesn't match the + * capabilities of the destination queue. + * - -ENOSPC The event port was backpressured and unable to enqueue + * one or more events. This error code is only applicable to + * closed systems. + * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst() + */ +static inline uint16_t +rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, + const struct rte_event ev[], uint16_t nb_events) +{ + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, + dev->enqueue_forward_burst); } /** @@ -1158,7 +1386,9 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, * * @return * - 0 on success. - * - <0 on failure. + * - -ENOTSUP if the device doesn't support timeouts + * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL + * - other values < 0 on failure. * * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT * @see rte_event_dev_configure() @@ -1199,9 +1429,12 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, * * The number of events dequeued is the number of scheduler contexts held by * this port. These contexts are automatically released in the next - * rte_event_dequeue_burst() invocation, or invoking rte_event_enqueue_burst() - * with RTE_EVENT_OP_RELEASE operation can be used to release the - * contexts early. + * rte_event_dequeue_burst() invocation if the port supports implicit + * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE + * operation can be used to release the contexts early. + * + * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be + * enqueued to the same port that their associated events were dequeued from. * * @param dev_id * The identifier of the device. @@ -1218,7 +1451,7 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, * - 0 no-wait, returns immediately if there is no event. * - >0 wait for the event, if the device is configured with * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until - * the event available or *timeout_ticks* time. + * at least one event is available or *timeout_ticks* time. * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT * then this function will wait until the event available or * *dequeue_timeout_ns* ns which was previously supplied to @@ -1237,6 +1470,18 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], { struct rte_eventdev *dev = &rte_eventdevs[dev_id]; +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG + if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { + rte_errno = -EINVAL; + return 0; + } + + if (port_id >= dev->data->nb_ports) { + rte_errno = -EINVAL; + return 0; + } +#endif + /* * Allow zero cost non burst mode routine invocation if application * requests nb_events as const one @@ -1389,6 +1634,24 @@ int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[]); +/** + * Retrieve the service ID of the event dev. If the adapter doesn't use + * a rte_service function, this function returns -ESRCH. + * + * @param dev_id + * The identifier of the device. + * + * @param [out] service_id + * A pointer to a uint32_t, to be filled in with the service id. + * + * @return + * - 0: Success + * - <0: Error code on failure, if the event dev doesn't use a rte_service + * function, this function returns -ESRCH. + */ +int +rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id); + /** * Dump internal information about *dev_id* to the FILE* provided in *f*. * @@ -1405,6 +1668,160 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, int rte_event_dev_dump(uint8_t dev_id, FILE *f); +/** Maximum name length for extended statistics counters */ +#define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64 + +/** + * Selects the component of the eventdev to retrieve statistics from. + */ +enum rte_event_dev_xstats_mode { + RTE_EVENT_DEV_XSTATS_DEVICE, + RTE_EVENT_DEV_XSTATS_PORT, + RTE_EVENT_DEV_XSTATS_QUEUE, +}; + +/** + * A name-key lookup element for extended statistics. + * + * This structure is used to map between names and ID numbers + * for extended ethdev statistics. + */ +struct rte_event_dev_xstats_name { + char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE]; +}; + +/** + * Retrieve names of extended statistics of an event device. + * + * @param dev_id + * The identifier of the event device. + * @param mode + * The mode of statistics to retrieve. Choices include the device statistics, + * port statistics or queue statistics. + * @param queue_port_id + * Used to specify the port or queue number in queue or port mode, and is + * ignored in device mode. + * @param[out] xstats_names + * Block of memory to insert names into. Must be at least size in capacity. + * If set to NULL, function returns required capacity. + * @param[out] ids + * Block of memory to insert ids into. Must be at least size in capacity. + * If set to NULL, function returns required capacity. The id values returned + * can be passed to *rte_event_dev_xstats_get* to select statistics. + * @param size + * Capacity of xstats_names (number of names). + * @return + * - positive value lower or equal to size: success. The return value + * is the number of entries filled in the stats table. + * - positive value higher than size: error, the given statistics table + * is too small. The return value corresponds to the size that should + * be given to succeed. The entries in the table are not valid and + * shall not be used by the caller. + * - negative value on error: + * -ENODEV for invalid *dev_id* + * -EINVAL for invalid mode, queue port or id parameters + * -ENOTSUP if the device doesn't support this function. + */ +int +rte_event_dev_xstats_names_get(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, + struct rte_event_dev_xstats_name *xstats_names, + unsigned int *ids, + unsigned int size); + +/** + * Retrieve extended statistics of an event device. + * + * @param dev_id + * The identifier of the device. + * @param mode + * The mode of statistics to retrieve. Choices include the device statistics, + * port statistics or queue statistics. + * @param queue_port_id + * Used to specify the port or queue number in queue or port mode, and is + * ignored in device mode. + * @param ids + * The id numbers of the stats to get. The ids can be got from the stat + * position in the stat list from rte_event_dev_get_xstats_names(), or + * by using rte_eventdev_get_xstats_by_name() + * @param[out] values + * The values for each stats request by ID. + * @param n + * The number of stats requested + * @return + * - positive value: number of stat entries filled into the values array + * - negative value on error: + * -ENODEV for invalid *dev_id* + * -EINVAL for invalid mode, queue port or id parameters + * -ENOTSUP if the device doesn't support this function. + */ +int +rte_event_dev_xstats_get(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, + const unsigned int ids[], + uint64_t values[], unsigned int n); + +/** + * Retrieve the value of a single stat by requesting it by name. + * + * @param dev_id + * The identifier of the device + * @param name + * The stat name to retrieve + * @param[out] id + * If non-NULL, the numerical id of the stat will be returned, so that further + * requests for the stat can be got using rte_eventdev_xstats_get, which will + * be faster as it doesn't need to scan a list of names for the stat. + * If the stat cannot be found, the id returned will be (unsigned)-1. + * @return + * - positive value or zero: the stat value + * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported. + */ +uint64_t +rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, + unsigned int *id); + +/** + * Reset the values of the xstats of the selected component in the device. + * + * @param dev_id + * The identifier of the device + * @param mode + * The mode of the statistics to reset. Choose from device, queue or port. + * @param queue_port_id + * The queue or port to reset. 0 and positive values select ports and queues, + * while -1 indicates all ports or queues. + * @param ids + * Selects specific statistics to be reset. When NULL, all statistics selected + * by *mode* will be reset. If non-NULL, must point to array of at least + * *nb_ids* size. + * @param nb_ids + * The number of ids available from the *ids* array. Ignored when ids is NULL. + * @return + * - zero: successfully reset the statistics to zero + * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported. + */ +int +rte_event_dev_xstats_reset(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, + int16_t queue_port_id, + const uint32_t ids[], + uint32_t nb_ids); + +/** + * Trigger the eventdev self test. + * + * @param dev_id + * The identifier of the device + * @return + * - 0: Selftest successful + * - -ENOTSUP if the device doesn't support selftest + * - other values < 0 on failure. + */ +int rte_event_dev_selftest(uint8_t dev_id); + #ifdef __cplusplus } #endif