eventdev: add callback for Rx adapter SW transfers
authorNikhil Rao <nikhil.rao@intel.com>
Thu, 28 Jun 2018 06:48:10 +0000 (12:18 +0530)
committerJerin Jacob <jerin.jacob@caviumnetworks.com>
Fri, 6 Jul 2018 04:54:49 +0000 (06:54 +0200)
Add ability for application to register a callback function
for SW transfers, the callback can decide which packets can
be enqueued to the event device.

Signed-off-by: Nikhil Rao <nikhil.rao@intel.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
doc/guides/prog_guide/event_ethernet_rx_adapter.rst
lib/librte_eventdev/rte_event_eth_rx_adapter.c
lib/librte_eventdev/rte_event_eth_rx_adapter.h
lib/librte_eventdev/rte_eventdev_version.map

index 810dfc9..0166bb4 100644 (file)
@@ -168,3 +168,20 @@ received on a polled Rx queue. The interrupt thread is affinitized to the same
 CPUs as the lcores of the Rx adapter service function, if the Rx adapter
 service function has not been mapped to any lcores, the interrupt thread
 is mapped to the master lcore.
+
+Rx Callback for SW Rx Adapter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For SW based packet transfers, i.e., when the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` is not set in the adapter's
+capabilities flags for a particular ethernet device, the service function
+temporarily enqueues mbufs to an event buffer before batch enqueueing these
+to the event device. If the buffer fills up, the service function stops
+dequeueing packets from the ethernet device. The application may want to
+monitor the buffer fill level and instruct the service function to selectively
+enqueue packets to the event device. The application may also use some other
+criteria to decide which packets should enter the event device even when
+the event buffer fill level is low. The
+``rte_event_eth_rx_adapter_cb_register()`` function allow the application
+to register a callback that selects which packets to enqueue to the event
+device.
index 42dd7f8..f5e5a0b 100644 (file)
@@ -147,6 +147,10 @@ struct rte_event_eth_rx_adapter {
 struct eth_device_info {
        struct rte_eth_dev *dev;
        struct eth_rx_queue_info *rx_queue;
+       /* Rx callback */
+       rte_event_eth_rx_adapter_cb_fn cb_fn;
+       /* Rx callback argument */
+       void *cb_arg;
        /* Set if ethdev->eventdev packet transfer uses a
         * hardware mechanism
         */
@@ -759,11 +763,12 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
                uint16_t num)
 {
        uint32_t i;
-       struct eth_device_info *eth_device_info =
+       struct eth_device_info *dev_info =
                                        &rx_adapter->eth_devices[eth_dev_id];
        struct eth_rx_queue_info *eth_rx_queue_info =
-                                       &eth_device_info->rx_queue[rx_queue_id];
-
+                                       &dev_info->rx_queue[rx_queue_id];
+       struct rte_eth_event_enqueue_buffer *buf =
+                                       &rx_adapter->event_enqueue_buffer;
        int32_t qid = eth_rx_queue_info->event_queue_id;
        uint8_t sched_type = eth_rx_queue_info->sched_type;
        uint8_t priority = eth_rx_queue_info->priority;
@@ -774,6 +779,8 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
        uint32_t rss;
        int do_rss;
        uint64_t ts;
+       struct rte_mbuf *cb_mbufs[BATCH_SIZE];
+       uint16_t nb_cb;
 
        /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
        rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
@@ -789,6 +796,19 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
                }
        }
 
+
+       nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id,
+                                               ETH_EVENT_BUFFER_SIZE,
+                                               buf->count, mbufs,
+                                               num,
+                                               dev_info->cb_arg,
+                                               cb_mbufs) :
+                                               num;
+       if (nb_cb < num) {
+               mbufs = cb_mbufs;
+               num = nb_cb;
+       }
+
        for (i = 0; i < num; i++) {
                m = mbufs[i];
                struct rte_event *ev = &events[i];
@@ -2364,3 +2384,47 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 
        return rx_adapter->service_inited ? 0 : -ESRCH;
 }
+
+int rte_event_eth_rx_adapter_cb_register(uint8_t id,
+                                       uint16_t eth_dev_id,
+                                       rte_event_eth_rx_adapter_cb_fn cb_fn,
+                                       void *cb_arg)
+{
+       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct eth_device_info *dev_info;
+       uint32_t cap;
+       int ret;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       rx_adapter = rxa_id_to_adapter(id);
+       if (rx_adapter == NULL)
+               return -EINVAL;
+
+       dev_info = &rx_adapter->eth_devices[eth_dev_id];
+       if (dev_info->rx_queue == NULL)
+               return -EINVAL;
+
+       ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+                                               eth_dev_id,
+                                               &cap);
+       if (ret) {
+               RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+                       "eth port %" PRIu16, id, eth_dev_id);
+               return ret;
+       }
+
+       if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+               RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
+                               PRIu16, eth_dev_id);
+               return -EINVAL;
+       }
+
+       rte_spinlock_lock(&rx_adapter->rx_lock);
+       dev_info->cb_fn = cb_fn;
+       dev_info->cb_arg = cb_arg;
+       rte_spinlock_unlock(&rx_adapter->rx_lock);
+
+       return 0;
+}
index 97f25e9..332ee21 100644 (file)
  * rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
  * the service function ID of the adapter in this case.
  *
+ * For SW based packet transfers, i.e., when the
+ * RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
+ * capabilities flags for a particular ethernet device, the service function
+ * temporarily enqueues mbufs to an event buffer before batch enqueueing these
+ * to the event device. If the buffer fills up, the service function stops
+ * dequeueing packets from the ethernet device. The application may want to
+ * monitor the buffer fill level and instruct the service function to
+ * selectively buffer packets. The application may also use some other
+ * criteria to decide which packets should enter the event device even when
+ * the event buffer fill level is low. The
+ * rte_event_eth_rx_adapter_cb_register() function allows the
+ * application to register a callback that selects which packets to enqueue
+ * to the event device.
+ *
  * Note:
  * 1) Devices created after an instance of rte_event_eth_rx_adapter_create
  *  should be added to a new instance of the rx adapter.
@@ -202,6 +216,47 @@ struct rte_event_eth_rx_adapter_stats {
        /**< Received packet count for interrupt mode Rx queues */
 };
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Callback function invoked by the SW adapter before it continues
+ * to process packets. The callback is passed the size of the enqueue
+ * buffer in the SW adapter and the occupancy of the buffer. The
+ * callback can use these values to decide which mbufs should be
+ * enqueued to the event device. If the return value of the callback
+ * is less than nb_mbuf then the SW adapter uses the return value to
+ * enqueue enq_mbuf[] to the event device.
+ *
+ * @param eth_dev_id
+ *  Port identifier of the Ethernet device.
+ * @param queue_id
+ *  Receive queue index.
+ * @param enqueue_buf_size
+ *  Total enqueue buffer size.
+ * @param enqueue_buf_count
+ *  mbuf count in enqueue buffer.
+ * @param mbuf
+ *  mbuf array.
+ * @param nb_mbuf
+ *  mbuf count.
+ * @param cb_arg
+ *  Callback argument.
+ * @param[out] enq_mbuf
+ *  The adapter enqueues enq_mbuf[] if the return value of the
+ *  callback is less than nb_mbuf
+ * @return
+ *  Returns the number of mbufs should be enqueued to eventdev
+ */
+typedef uint16_t (*rte_event_eth_rx_adapter_cb_fn)(uint16_t eth_dev_id,
+                                               uint16_t queue_id,
+                                               uint32_t enqueue_buf_size,
+                                               uint32_t enqueue_buf_count,
+                                               struct rte_mbuf **mbuf,
+                                               uint16_t nb_mbuf,
+                                               void *cb_arg,
+                                               struct rte_mbuf **enq_buf);
+
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice
@@ -426,6 +481,32 @@ int rte_event_eth_rx_adapter_stats_reset(uint8_t id);
  */
 int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register callback to process Rx packets, this is supported for
+ * SW based packet transfers.
+ * @see rte_event_eth_rx_cb_fn
+ *
+ * @param id
+ *  Adapter identifier.
+ * @param eth_dev_id
+ *  Port identifier of Ethernet device.
+ * @param cb_fn
+ *  Callback function.
+ * @param cb_arg
+ *  Callback arg.
+ * @return
+ *  - 0: Success
+ *  - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_eth_rx_adapter_cb_register(uint8_t id,
+                               uint16_t eth_dev_id,
+                               rte_event_eth_rx_adapter_cb_fn cb_fn,
+                               void *cb_arg);
+
 #ifdef __cplusplus
 }
 #endif
index c3f18d6..12835e9 100644 (file)
@@ -83,6 +83,19 @@ DPDK_18.05 {
 EXPERIMENTAL {
        global:
 
+       rte_event_crypto_adapter_caps_get;
+       rte_event_crypto_adapter_create;
+       rte_event_crypto_adapter_create_ext;
+       rte_event_crypto_adapter_event_port_get;
+       rte_event_crypto_adapter_free;
+       rte_event_crypto_adapter_queue_pair_add;
+       rte_event_crypto_adapter_queue_pair_del;
+       rte_event_crypto_adapter_service_id_get;
+       rte_event_crypto_adapter_start;
+       rte_event_crypto_adapter_stats_get;
+       rte_event_crypto_adapter_stats_reset;
+       rte_event_crypto_adapter_stop;
+       rte_event_eth_rx_adapter_cb_register;
        rte_event_timer_adapter_caps_get;
        rte_event_timer_adapter_create;
        rte_event_timer_adapter_create_ext;
@@ -97,16 +110,4 @@ EXPERIMENTAL {
        rte_event_timer_arm_burst;
        rte_event_timer_arm_tmo_tick_burst;
        rte_event_timer_cancel_burst;
-       rte_event_crypto_adapter_caps_get;
-       rte_event_crypto_adapter_create;
-       rte_event_crypto_adapter_create_ext;
-       rte_event_crypto_adapter_event_port_get;
-       rte_event_crypto_adapter_free;
-       rte_event_crypto_adapter_queue_pair_add;
-       rte_event_crypto_adapter_queue_pair_del;
-       rte_event_crypto_adapter_service_id_get;
-       rte_event_crypto_adapter_start;
-       rte_event_crypto_adapter_stats_get;
-       rte_event_crypto_adapter_stats_reset;
-       rte_event_crypto_adapter_stop;
 };