X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_eventdev.h;h=5ce2f33fecd237903f48df7c87bead02ccc8529c;hb=3ed7fc039ae86a6c3f6d15654719a293316a9db5;hp=014e1ecbddebc2061d6cbb956afe6101ecbb3578;hpb=5223a1f3b8dedada24385f8365f5ef21818e8f0e;p=dpdk.git diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h index 014e1ecbdd..5ce2f33fec 100644 --- a/lib/librte_eventdev/rte_eventdev.h +++ b/lib/librte_eventdev/rte_eventdev.h @@ -244,8 +244,9 @@ extern "C" { #endif #include -#include -#include +#include + +struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ /* Event device capability bitmap flags */ #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0) @@ -270,6 +271,13 @@ extern "C" { * * @see rte_event_schedule(), rte_event_dequeue_burst() */ +#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3) +/**< Event device is capable of enqueuing events of any type to any queue. + * If this capability is not set, the queue only supports events of the + * *RTE_EVENT_QUEUE_CFG_* type that it was created with. + * + * @see RTE_EVENT_QUEUE_CFG_* values + */ /* Event device priority levels */ #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 @@ -329,7 +337,7 @@ rte_event_dev_socket_id(uint8_t dev_id); */ struct rte_event_dev_info { const char *driver_name; /**< Event driver name */ - struct rte_pci_device *pci_dev; /**< PCI information */ + struct rte_device *dev; /**< Device information */ uint32_t min_dequeue_timeout_ns; /**< Minimum supported global dequeue timeout(ns) by this device */ uint32_t max_dequeue_timeout_ns; @@ -403,11 +411,12 @@ struct rte_event_dev_config { * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT */ int32_t nb_events_limit; - /**< Applies to *closed system* event dev only. This field indicates a - * limit to ethdev-like devices to limit the number of events injected - * into the system to not overwhelm core-to-core events. - * This value cannot exceed the *max_num_events* which previously - * provided in rte_event_dev_info_get() + /**< In a *closed system* this field is the limit on maximum number of + * events that can be inflight in the eventdev at a given time. The + * limit is required to ensure that the finite space in a closed system + * is not overwhelmed. The value cannot exceed the *max_num_events* + * as provided by rte_event_dev_info_get(). + * This value should be set to -1 for *open system*. */ uint8_t nb_event_queues; /**< Number of event queues to configure on this device. @@ -424,7 +433,7 @@ struct rte_event_dev_config { * This value cannot exceed the *max_event_queue_flows* which previously * provided in rte_event_dev_info_get() */ - uint8_t nb_event_port_dequeue_depth; + uint32_t nb_event_port_dequeue_depth; /**< Maximum number of events can be dequeued at a time from an * event port by this device. * This value cannot exceed the *max_event_port_dequeue_depth* @@ -469,12 +478,6 @@ rte_event_dev_configure(uint8_t dev_id, /* Event queue specific APIs */ /* Event queue configuration bitmap flags */ -#define RTE_EVENT_QUEUE_CFG_DEFAULT (0) -/**< Default value of *event_queue_cfg* when rte_event_queue_setup() invoked - * with queue_conf == NULL - * - * @see rte_event_queue_setup() - */ #define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0) /**< Mask for event queue schedule type configuration request */ #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0) @@ -632,14 +635,15 @@ struct rte_event_port_conf { * can have a lower threshold so as not to overwhelm the device, * while ports used for worker pools can have a higher threshold. * This value cannot exceed the *nb_events_limit* - * which previously supplied to rte_event_dev_configure() + * which was previously supplied to rte_event_dev_configure(). + * This should be set to '-1' for *open system*. */ - uint8_t dequeue_depth; + uint16_t dequeue_depth; /**< Configure number of bulk dequeues for this event port. * This value cannot exceed the *nb_event_port_dequeue_depth* * which previously supplied to rte_event_dev_configure() */ - uint8_t enqueue_depth; + uint16_t enqueue_depth; /**< Configure number of bulk enqueues for this event port. * This value cannot exceed the *nb_event_port_enqueue_depth* * which previously supplied to rte_event_dev_configure() @@ -972,6 +976,8 @@ struct rte_event { }; }; + +struct rte_eventdev_driver; struct rte_eventdev_ops; struct rte_eventdev; @@ -993,6 +999,49 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks); /**< @internal Dequeue burst of events from port of a device */ +#define RTE_EVENTDEV_NAME_MAX_LEN (64) +/**< @internal Max length of name of event PMD */ + +/** + * @internal + * The data part, with no function pointers, associated with each device. + * + * This structure is safe to place in shared memory to be common among + * different processes in a multi-process configuration. + */ +struct rte_eventdev_data { + int socket_id; + /**< Socket ID where memory is allocated */ + uint8_t dev_id; + /**< Device ID for this instance */ + uint8_t nb_queues; + /**< Number of event queues. */ + uint8_t nb_ports; + /**< Number of event ports. */ + void **ports; + /**< Array of pointers to ports. */ + uint8_t *ports_dequeue_depth; + /**< Array of port dequeue depth. */ + uint8_t *ports_enqueue_depth; + /**< Array of port enqueue depth. */ + uint8_t *queues_prio; + /**< Array of queue priority. */ + uint16_t *links_map; + /**< Memory to store queues to port connections. */ + void *dev_private; + /**< PMD-specific private data */ + uint32_t event_dev_cap; + /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ + struct rte_event_dev_config dev_conf; + /**< Configuration applied to device. */ + + RTE_STD_C11 + uint8_t dev_started : 1; + /**< Device state: STARTED(1)/STOPPED(0) */ + + char name[RTE_EVENTDEV_NAME_MAX_LEN]; + /**< Unique identifier name */ +} __rte_cache_aligned; /** @internal The data structure associated with each event device. */ struct rte_eventdev { @@ -1007,8 +1056,23 @@ struct rte_eventdev { event_dequeue_burst_t dequeue_burst; /**< Pointer to PMD dequeue burst function. */ + struct rte_eventdev_data *data; + /**< Pointer to device data */ + const struct rte_eventdev_ops *dev_ops; + /**< Functions exported by PMD */ + struct rte_device *dev; + /**< Device info. supplied by probing */ + const struct rte_eventdev_driver *driver; + /**< Driver for this device */ + + RTE_STD_C11 + uint8_t attached : 1; + /**< Flag indicating the device is attached */ } __rte_cache_aligned; +extern struct rte_eventdev *rte_eventdevs; +/** @internal The pool of rte_eventdev structures. */ + /** * Schedule one or more events in the event dev. @@ -1019,8 +1083,13 @@ struct rte_eventdev { * @param dev_id * The identifier of the device. */ -void -rte_event_schedule(uint8_t dev_id); +static inline void +rte_event_schedule(uint8_t dev_id) +{ + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + if (*dev->schedule) + (*dev->schedule)(dev); +} /** * Enqueue a burst of events objects or an event object supplied in *rte_event* @@ -1055,9 +1124,23 @@ rte_event_schedule(uint8_t dev_id); * * @see rte_event_port_enqueue_depth() */ -uint16_t +static inline uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, - const struct rte_event ev[], uint16_t nb_events); + const struct rte_event ev[], uint16_t nb_events) +{ + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + /* + * Allow zero cost non burst mode routine invocation if application + * requests nb_events as const one + */ + if (nb_events == 1) + return (*dev->enqueue)( + dev->data->ports[port_id], ev); + else + return (*dev->enqueue_burst)( + dev->data->ports[port_id], ev, nb_events); +} /** * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst() @@ -1076,7 +1159,9 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, * * @return * - 0 on success. - * - <0 on failure. + * - -ENOTSUP if the device doesn't support timeouts + * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL + * - other values < 0 on failure. * * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT * @see rte_event_dev_configure() @@ -1136,7 +1221,7 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, * - 0 no-wait, returns immediately if there is no event. * - >0 wait for the event, if the device is configured with * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until - * the event available or *timeout_ticks* time. + * at least one event is available or *timeout_ticks* time. * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT * then this function will wait until the event available or * *dequeue_timeout_ns* ns which was previously supplied to @@ -1149,9 +1234,24 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, * * @see rte_event_port_dequeue_depth() */ -uint16_t +static inline uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], - uint16_t nb_events, uint64_t timeout_ticks); + uint16_t nb_events, uint64_t timeout_ticks) +{ + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + /* + * Allow zero cost non burst mode routine invocation if application + * requests nb_events as const one + */ + if (nb_events == 1) + return (*dev->dequeue)( + dev->data->ports[port_id], ev, timeout_ticks); + else + return (*dev->dequeue_burst)( + dev->data->ports[port_id], ev, nb_events, + timeout_ticks); +} /** * Link multiple source event queues supplied in *queues* to the destination @@ -1196,7 +1296,8 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority * * @param nb_links - * The number of links to establish + * The number of links to establish. This parameter is ignored if queues is + * NULL. * * @return * The number of links actually established. The return value can be less than @@ -1241,7 +1342,8 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, * event queue(s) from the event port *port_id*. * * @param nb_unlinks - * The number of unlinks to establish + * The number of unlinks to establish. This parameter is ignored if queues is + * NULL. * * @return * The number of unlinks actually established. The return value can be less @@ -1306,6 +1408,148 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, int rte_event_dev_dump(uint8_t dev_id, FILE *f); +/** Maximum name length for extended statistics counters */ +#define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64 + +/** + * Selects the component of the eventdev to retrieve statistics from. + */ +enum rte_event_dev_xstats_mode { + RTE_EVENT_DEV_XSTATS_DEVICE, + RTE_EVENT_DEV_XSTATS_PORT, + RTE_EVENT_DEV_XSTATS_QUEUE, +}; + +/** + * A name-key lookup element for extended statistics. + * + * This structure is used to map between names and ID numbers + * for extended ethdev statistics. + */ +struct rte_event_dev_xstats_name { + char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE]; +}; + +/** + * Retrieve names of extended statistics of an event device. + * + * @param dev_id + * The identifier of the event device. + * @param mode + * The mode of statistics to retrieve. Choices include the device statistics, + * port statistics or queue statistics. + * @param queue_port_id + * Used to specify the port or queue number in queue or port mode, and is + * ignored in device mode. + * @param[out] xstats_names + * Block of memory to insert names into. Must be at least size in capacity. + * If set to NULL, function returns required capacity. + * @param[out] ids + * Block of memory to insert ids into. Must be at least size in capacity. + * If set to NULL, function returns required capacity. The id values returned + * can be passed to *rte_event_dev_xstats_get* to select statistics. + * @param size + * Capacity of xstats_names (number of names). + * @return + * - positive value lower or equal to size: success. The return value + * is the number of entries filled in the stats table. + * - positive value higher than size: error, the given statistics table + * is too small. The return value corresponds to the size that should + * be given to succeed. The entries in the table are not valid and + * shall not be used by the caller. + * - negative value on error: + * -ENODEV for invalid *dev_id* + * -EINVAL for invalid mode, queue port or id parameters + * -ENOTSUP if the device doesn't support this function. + */ +int +rte_event_dev_xstats_names_get(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, + struct rte_event_dev_xstats_name *xstats_names, + unsigned int *ids, + unsigned int size); + +/** + * Retrieve extended statistics of an event device. + * + * @param dev_id + * The identifier of the device. + * @param mode + * The mode of statistics to retrieve. Choices include the device statistics, + * port statistics or queue statistics. + * @param queue_port_id + * Used to specify the port or queue number in queue or port mode, and is + * ignored in device mode. + * @param ids + * The id numbers of the stats to get. The ids can be got from the stat + * position in the stat list from rte_event_dev_get_xstats_names(), or + * by using rte_eventdev_get_xstats_by_name() + * @param[out] values + * The values for each stats request by ID. + * @param n + * The number of stats requested + * @return + * - positive value: number of stat entries filled into the values array + * - negative value on error: + * -ENODEV for invalid *dev_id* + * -EINVAL for invalid mode, queue port or id parameters + * -ENOTSUP if the device doesn't support this function. + */ +int +rte_event_dev_xstats_get(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, + const unsigned int ids[], + uint64_t values[], unsigned int n); + +/** + * Retrieve the value of a single stat by requesting it by name. + * + * @param dev_id + * The identifier of the device + * @param name + * The stat name to retrieve + * @param[out] id + * If non-NULL, the numerical id of the stat will be returned, so that further + * requests for the stat can be got using rte_eventdev_xstats_get, which will + * be faster as it doesn't need to scan a list of names for the stat. + * If the stat cannot be found, the id returned will be (unsigned)-1. + * @return + * - positive value or zero: the stat value + * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported. + */ +uint64_t +rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, + unsigned int *id); + +/** + * Reset the values of the xstats of the selected component in the device. + * + * @param dev_id + * The identifier of the device + * @param mode + * The mode of the statistics to reset. Choose from device, queue or port. + * @param queue_port_id + * The queue or port to reset. 0 and positive values select ports and queues, + * while -1 indicates all ports or queues. + * @param ids + * Selects specific statistics to be reset. When NULL, all statistics selected + * by *mode* will be reset. If non-NULL, must point to array of at least + * *nb_ids* size. + * @param nb_ids + * The number of ids available from the *ids* array. Ignored when ids is NULL. + * @return + * - zero: successfully reset the statistics to zero + * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported. + */ +int +rte_event_dev_xstats_reset(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, + int16_t queue_port_id, + const uint32_t ids[], + uint32_t nb_ids); + #ifdef __cplusplus } #endif