*/
struct rte_event_dev_info {
const char *driver_name; /**< Event driver name */
- struct rte_pci_device *pci_dev; /**< PCI information */
+ struct rte_device *dev; /**< Device information */
uint32_t min_dequeue_timeout_ns;
/**< Minimum supported global dequeue timeout(ns) by this device */
uint32_t max_dequeue_timeout_ns;
* @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
*/
int32_t nb_events_limit;
- /**< Applies to *closed system* event dev only. This field indicates a
- * limit to ethdev-like devices to limit the number of events injected
- * into the system to not overwhelm core-to-core events.
- * This value cannot exceed the *max_num_events* which previously
- * provided in rte_event_dev_info_get()
+ /**< In a *closed system* this field is the limit on maximum number of
+ * events that can be inflight in the eventdev at a given time. The
+ * limit is required to ensure that the finite space in a closed system
+ * is not overwhelmed. The value cannot exceed the *max_num_events*
+ * as provided by rte_event_dev_info_get().
+ * This value should be set to -1 for *open system*.
*/
uint8_t nb_event_queues;
/**< Number of event queues to configure on this device.
* This value cannot exceed the *max_event_queue_flows* which previously
* provided in rte_event_dev_info_get()
*/
- uint8_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_dequeue_depth;
/**< Maximum number of events can be dequeued at a time from an
* event port by this device.
* This value cannot exceed the *max_event_port_dequeue_depth*
* can have a lower threshold so as not to overwhelm the device,
* while ports used for worker pools can have a higher threshold.
* This value cannot exceed the *nb_events_limit*
- * which previously supplied to rte_event_dev_configure()
+ * which was previously supplied to rte_event_dev_configure().
+ * This should be set to '-1' for *open system*.
*/
- uint8_t dequeue_depth;
+ uint16_t dequeue_depth;
/**< Configure number of bulk dequeues for this event port.
* This value cannot exceed the *nb_event_port_dequeue_depth*
* which previously supplied to rte_event_dev_configure()
*/
- uint8_t enqueue_depth;
+ uint16_t enqueue_depth;
/**< Configure number of bulk enqueues for this event port.
* This value cannot exceed the *nb_event_port_enqueue_depth*
* which previously supplied to rte_event_dev_configure()
/**< Pointer to device data */
const struct rte_eventdev_ops *dev_ops;
/**< Functions exported by PMD */
- struct rte_pci_device *pci_dev;
- /**< PCI info. supplied by probing */
+ struct rte_device *dev;
+ /**< Device info. supplied by probing */
const struct rte_eventdev_driver *driver;
/**< Driver for this device */
*
* @return
* - 0 on success.
- * - <0 on failure.
+ * - -ENOTSUP if the device doesn't support timeouts
+ * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
+ * - other values < 0 on failure.
*
* @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
* @see rte_event_dev_configure()
* - 0 no-wait, returns immediately if there is no event.
* - >0 wait for the event, if the device is configured with
* RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
- * the event available or *timeout_ticks* time.
+ * at least one event is available or *timeout_ticks* time.
* if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
* then this function will wait until the event available or
* *dequeue_timeout_ns* ns which was previously supplied to
* with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
*
* @param nb_links
- * The number of links to establish
+ * The number of links to establish. This parameter is ignored if queues is
+ * NULL.
*
* @return
* The number of links actually established. The return value can be less than
* event queue(s) from the event port *port_id*.
*
* @param nb_unlinks
- * The number of unlinks to establish
+ * The number of unlinks to establish. This parameter is ignored if queues is
+ * NULL.
*
* @return
* The number of unlinks actually established. The return value can be less