From: Jerin Jacob Date: Thu, 29 Jun 2017 14:19:53 +0000 (+0530) Subject: eventdev: introduce specialized enqueue new op variant X-Git-Tag: spdx-start~2621 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=65293784977f47ccdf5d8813a3e599068d8dae52;p=dpdk.git eventdev: introduce specialized enqueue new op variant Introducing the rte_event_enqueue_new_burst() for enabling the PMD, an optimization opportunity to optimize if all the events in the enqueue burst has the op type of RTE_EVENT_OP_NEW. If a PMD does not have any optimization opportunity for this operation then the PMD can choose the generic enqueue burst PMD callback as the fallback. Signed-off-by: Jerin Jacob Acked-by: Gage Eads Acked-by: Harry van Haaren --- diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index 80ee65c839..633e2a0fad 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -625,6 +625,7 @@ dpaa2_eventdev_create(const char *name) eventdev->schedule = NULL; eventdev->enqueue = dpaa2_eventdev_enqueue; eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst; + eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst; eventdev->dequeue = dpaa2_eventdev_dequeue; eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index 8dc7b2ef8b..0d0c6a186d 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -158,6 +158,7 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev) dev->schedule = NULL; dev->enqueue = ssows_enq; dev->enqueue_burst = ssows_enq_burst; + dev->enqueue_new_burst = ssows_enq_burst; dev->dequeue = ssows_deq; dev->dequeue_burst = ssows_deq_burst; diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index fe2a61e2f4..951ad1b337 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -796,6 +796,7 @@ sw_probe(struct rte_vdev_device *vdev) dev->dev_ops = &evdev_sw_ops; dev->enqueue = sw_event_enqueue; dev->enqueue_burst = sw_event_enqueue_burst; + dev->enqueue_new_burst = sw_event_enqueue_burst; dev->dequeue = sw_event_dequeue; dev->dequeue_burst = sw_event_dequeue_burst; dev->schedule = sw_event_schedule; diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h index c4d623a627..132f75fda2 100644 --- a/lib/librte_eventdev/rte_eventdev.h +++ b/lib/librte_eventdev/rte_eventdev.h @@ -1065,6 +1065,8 @@ struct rte_eventdev { /**< Pointer to PMD enqueue function. */ event_enqueue_burst_t enqueue_burst; /**< Pointer to PMD enqueue burst function. */ + event_enqueue_burst_t enqueue_new_burst; + /**< Pointer to PMD enqueue burst function(op new variant) */ event_dequeue_t dequeue; /**< Pointer to PMD dequeue function. */ event_dequeue_burst_t dequeue_burst; @@ -1180,6 +1182,55 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, dev->enqueue_burst); } +/** + * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on + * an event device designated by its *dev_id* through the event port specified + * by *port_id*. + * + * Provides the same functionality as rte_event_enqueue_burst(), expect that + * application can use this API when the all objects in the burst contains + * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized + * function can provide the additional hint to the PMD and optimize if possible. + * + * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst + * has event object of operation type != RTE_EVENT_OP_NEW. + * + * @param dev_id + * The identifier of the device. + * @param port_id + * The identifier of the event port. + * @param ev + * Points to an array of *nb_events* objects of type *rte_event* structure + * which contain the event object enqueue operations to be processed. + * @param nb_events + * The number of event objects to enqueue, typically number of + * rte_event_port_enqueue_depth() available for this port. + * + * @return + * The number of event objects actually enqueued on the event device. The + * return value can be less than the value of the *nb_events* parameter when + * the event devices queue is full or if invalid parameters are specified in a + * *rte_event*. If the return value is less than *nb_events*, the remaining + * events at the end of ev[] are not consumed and the caller has to take care + * of them, and rte_errno is set accordingly. Possible errno values include: + * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * ID is invalid, or an event's sched type doesn't match the + * capabilities of the destination queue. + * - -ENOSPC The event port was backpressured and unable to enqueue + * one or more events. This error code is only applicable to + * closed systems. + * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst() + */ +static inline uint16_t +rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, + const struct rte_event ev[], uint16_t nb_events) +{ + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, + dev->enqueue_new_burst); +} + /** * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst() *