lib: remove unneeded header includes
[dpdk.git] / lib / eventdev / rte_event_eth_rx_adapter.c
index b3d9416..ae1e260 100644 (file)
 #include <rte_service_component.h>
 #include <rte_thash.h>
 #include <rte_interrupts.h>
+#include <rte_mbuf_dyn.h>
+#include <rte_telemetry.h>
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_rx_adapter.h"
 
 #define BATCH_SIZE             32
@@ -40,6 +42,8 @@
 /* Sentinel value to detect initialized file handle */
 #define INIT_FD                -1
 
+#define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
+
 /*
  * Used to store port and queue ID of interrupting Rx queue
  */
@@ -78,11 +82,13 @@ struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
        /* Count of events in this buffer */
        uint16_t count;
        /* Array of events in this buffer */
-       struct rte_event events[ETH_EVENT_BUFFER_SIZE];
+       struct rte_event *events;
+       /* size of event buffer */
+       uint16_t events_size;
        /* Event enqueue happens from head */
        uint16_t head;
        /* New packets from rte_eth_rx_burst is enqued from tail */
@@ -92,15 +98,17 @@ struct rte_eth_event_enqueue_buffer {
        uint16_t last_mask;
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
        /* RSS key */
        uint8_t rss_key_be[RSS_KEY_SIZE];
        /* Event device identifier */
        uint8_t eventdev_id;
-       /* Per ethernet device structure */
-       struct eth_device_info *eth_devices;
        /* Event port identifier */
        uint8_t event_port_id;
+       /* Flag indicating per rxq event buffer */
+       bool use_queue_event_buf;
+       /* Per ethernet device structure */
+       struct eth_device_info *eth_devices;
        /* Lock to serialize config updates with service function */
        rte_spinlock_t rx_lock;
        /* Max mbufs processed in any service function invocation */
@@ -116,7 +124,7 @@ struct rte_event_eth_rx_adapter {
        /* Next entry in wrr[] to begin polling */
        uint32_t wrr_pos;
        /* Event burst buffer */
-       struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+       struct eth_event_enqueue_buffer event_enqueue_buffer;
        /* Vector enable flag */
        uint8_t ena_vector;
        /* Timestamp of previous vector expiry list traversal */
@@ -236,9 +244,25 @@ struct eth_rx_queue_info {
        uint32_t flow_id_mask;  /* Set to ~0 if app provides flow id else 0 */
        uint64_t event;
        struct eth_rx_vector_data vector_data;
+       struct eth_event_enqueue_buffer *event_buf;
+       /* use adapter stats struct for queue level stats,
+        * as same stats need to be updated for adapter and queue
+        */
+       struct rte_event_eth_rx_adapter_stats *stats;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
+
+/* Enable dynamic timestamp field in mbuf */
+static uint64_t event_eth_rx_timestamp_dynflag;
+static int event_eth_rx_timestamp_dynfield_offset = -1;
+
+static inline rte_mbuf_timestamp_t *
+rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
+{
+       return RTE_MBUF_DYNFIELD(mbuf,
+               event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
+}
 
 static inline int
 rxa_validate_id(uint8_t id)
@@ -246,6 +270,22 @@ rxa_validate_id(uint8_t id)
        return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
 }
 
+static inline struct eth_event_enqueue_buffer *
+rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+                 uint16_t rx_queue_id,
+                 struct rte_event_eth_rx_adapter_stats **stats)
+{
+       if (rx_adapter->use_queue_event_buf) {
+               struct eth_device_info *dev_info =
+                       &rx_adapter->eth_devices[eth_dev_id];
+               *stats = dev_info->rx_queue[rx_queue_id].stats;
+               return dev_info->rx_queue[rx_queue_id].event_buf;
+       } else {
+               *stats = &rx_adapter->stats;
+               return &rx_adapter->event_enqueue_buffer;
+       }
+}
+
 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
        if (!rxa_validate_id(id)) { \
                RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
@@ -254,7 +294,7 @@ rxa_validate_id(uint8_t id)
 } while (0)
 
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
        return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -272,10 +312,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-        unsigned int n, int *cw,
-        struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-        uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+            struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+            uint16_t gcd, int prev)
 {
        int i = prev;
        uint16_t w;
@@ -380,10 +419,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint32_t *nb_rx_intr)
 {
        uint32_t intr_diff;
 
@@ -399,12 +437,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint32_t *nb_rx_poll,
-                       uint32_t *nb_rx_intr,
-                       uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+                         uint32_t *nb_wrr)
 {
        uint32_t intr_diff;
        uint32_t poll_diff;
@@ -431,11 +467,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint32_t *nb_rx_poll,
-                       uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
        uint32_t poll_diff;
        uint32_t wrr_len_diff;
@@ -456,13 +490,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint16_t wt,
-                       uint32_t *nb_rx_poll,
-                       uint32_t *nb_rx_intr,
-                       uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint16_t wt, uint32_t *nb_rx_poll,
+                         uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
        uint32_t intr_diff;
        uint32_t poll_diff;
@@ -489,13 +520,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int rx_queue_id,
-               uint16_t wt,
-               uint32_t *nb_rx_poll,
-               uint32_t *nb_rx_intr,
-               uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+                    struct eth_device_info *dev_info, int rx_queue_id,
+                    uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+                    uint32_t *nb_wrr)
 {
        if (wt != 0)
                rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -507,12 +535,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int rx_queue_id,
-               uint32_t *nb_rx_poll,
-               uint32_t *nb_rx_intr,
-               uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+                    struct eth_device_info *dev_info, int rx_queue_id,
+                    uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+                    uint32_t *nb_wrr)
 {
        rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
                                nb_wrr);
@@ -524,8 +550,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-       uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
        size_t len;
 
@@ -541,7 +566,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
        size_t len;
 
@@ -554,11 +579,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-               uint32_t nb_poll,
-               uint32_t nb_wrr,
-               struct eth_rx_poll_entry **rx_poll,
-               uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+                     uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+                     uint32_t **wrr_sched)
 {
 
        if (nb_poll == 0) {
@@ -583,9 +606,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_rx_poll_entry *rx_poll,
-               uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+                     struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
        uint16_t d;
        uint16_t q;
@@ -712,13 +734,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
        return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
        if (rx_adapter->rx_enq_block_start_ts)
                return;
@@ -731,8 +753,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-                   struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+                    struct rte_event_eth_rx_adapter_stats *stats)
 {
        if (unlikely(!stats->rx_enq_start_ts))
                stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -751,24 +773,29 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
+                      struct eth_event_enqueue_buffer *buf,
+                      struct rte_event_eth_rx_adapter_stats *stats)
 {
-       struct rte_eth_event_enqueue_buffer *buf =
-           &rx_adapter->event_enqueue_buffer;
-       struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
-       uint16_t count = buf->last ? buf->last - buf->head : buf->count;
+       uint16_t count = buf->count;
+       uint16_t n = 0;
 
        if (!count)
                return 0;
 
-       uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
-                                       rx_adapter->event_port_id,
-                                       &buf->events[buf->head],
-                                       count);
-       if (n != count)
-               stats->rx_enq_retry++;
+       if (buf->last)
+               count = buf->last - buf->head;
 
-       buf->head += n;
+       if (count) {
+               n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
+                                               rx_adapter->event_port_id,
+                                               &buf->events[buf->head],
+                                               count);
+               if (n != count)
+                       stats->rx_enq_retry++;
+
+               buf->head += n;
+       }
 
        if (buf->last && n == count) {
                uint16_t n1;
@@ -797,7 +824,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
                struct eth_rx_vector_data *vec)
 {
        vec->vector_ev->nb_elem = 0;
@@ -808,9 +835,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
                        struct eth_rx_queue_info *queue_info,
-                       struct rte_eth_event_enqueue_buffer *buf,
+                       struct eth_event_enqueue_buffer *buf,
                        struct rte_mbuf **mbufs, uint16_t num)
 {
        struct rte_event *ev = &buf->events[buf->count];
@@ -868,19 +895,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-               uint16_t eth_dev_id,
-               uint16_t rx_queue_id,
-               struct rte_mbuf **mbufs,
-               uint16_t num)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+                uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
+                struct eth_event_enqueue_buffer *buf,
+                struct rte_event_eth_rx_adapter_stats *stats)
 {
        uint32_t i;
        struct eth_device_info *dev_info =
                                        &rx_adapter->eth_devices[eth_dev_id];
        struct eth_rx_queue_info *eth_rx_queue_info =
                                        &dev_info->rx_queue[rx_queue_id];
-       struct rte_eth_event_enqueue_buffer *buf =
-                                       &rx_adapter->event_enqueue_buffer;
        uint16_t new_tail = buf->tail;
        uint64_t event = eth_rx_queue_info->event;
        uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
@@ -890,15 +914,28 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
        int do_rss;
        uint16_t nb_cb;
        uint16_t dropped;
+       uint64_t ts, ts_mask;
 
        if (!eth_rx_queue_info->ena_vector) {
-               /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
-               rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
+               ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
+                                               0 : rte_get_tsc_cycles();
+
+               /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
+                * otherwise 0
+                */
+               ts_mask = (uint64_t)(!(m->ol_flags &
+                                      event_eth_rx_timestamp_dynflag)) - 1ULL;
+
+               /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
+               rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
                do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
                for (i = 0; i < num; i++) {
                        struct rte_event *ev;
 
                        m = mbufs[i];
+                       *rxa_timestamp_dynfield(m) = ts |
+                                       (*rxa_timestamp_dynfield(m) & ts_mask);
+
                        ev = &buf->events[new_tail];
 
                        rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
@@ -919,7 +956,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
                dropped = 0;
                nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
                                       buf->last |
-                                      (RTE_DIM(buf->events) & ~buf->last_mask),
+                                      (buf->events_size & ~buf->last_mask),
                                       buf->count >= BATCH_SIZE ?
                                                buf->count - BATCH_SIZE : 0,
                                       &buf->events[buf->tail],
@@ -932,7 +969,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
                else
                        num = nb_cb;
                if (dropped)
-                       rx_adapter->stats.rx_dropped += dropped;
+                       stats->rx_dropped += dropped;
        }
 
        buf->count += num;
@@ -940,12 +977,12 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline bool
-rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
+rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 {
        uint32_t nb_req = buf->tail + BATCH_SIZE;
 
        if (!buf->last) {
-               if (nb_req <= RTE_DIM(buf->events))
+               if (nb_req <= buf->events_size)
                        return true;
 
                if (buf->head >= BATCH_SIZE) {
@@ -961,20 +998,15 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-       uint16_t port_id,
-       uint16_t queue_id,
-       uint32_t rx_count,
-       uint32_t max_rx,
-       int *rxq_empty)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+          uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+          int *rxq_empty, struct eth_event_enqueue_buffer *buf,
+          struct rte_event_eth_rx_adapter_stats *stats)
 {
        struct rte_mbuf *mbufs[BATCH_SIZE];
-       struct rte_eth_event_enqueue_buffer *buf =
-                                       &rx_adapter->event_enqueue_buffer;
-       struct rte_event_eth_rx_adapter_stats *stats =
-                                       &rx_adapter->stats;
        uint16_t n;
        uint32_t nb_rx = 0;
+       uint32_t nb_flushed = 0;
 
        if (rxq_empty)
                *rxq_empty = 0;
@@ -983,7 +1015,8 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
         */
        while (rxa_pkt_buf_available(buf)) {
                if (buf->count >= BATCH_SIZE)
-                       rxa_flush_event_buffer(rx_adapter);
+                       nb_flushed +=
+                               rxa_flush_event_buffer(rx_adapter, buf, stats);
 
                stats->rx_poll_count++;
                n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
@@ -992,21 +1025,26 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
                                *rxq_empty = 1;
                        break;
                }
-               rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
+               rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
+                                stats);
                nb_rx += n;
                if (rx_count + nb_rx > max_rx)
                        break;
        }
 
        if (buf->count > 0)
-               rxa_flush_event_buffer(rx_adapter);
+               nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats);
+
+       stats->rx_packets += nb_rx;
+       if (nb_flushed == 0)
+               rte_event_maintain(rx_adapter->eventdev_id,
+                                  rx_adapter->event_port_id, 0);
 
        return nb_rx;
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-               void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
        uint16_t port_id;
        uint16_t queue;
@@ -1046,8 +1084,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-                       uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+                         uint32_t num_intr_vec)
 {
        if (rx_adapter->num_intr_vec + num_intr_vec >
                                RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1062,9 +1100,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info,
+                         uint16_t rx_queue_id)
 {
        int i, n;
        union queue_data qd;
@@ -1097,7 +1135,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter = arg;
+       struct event_eth_rx_adapter *rx_adapter = arg;
        struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
        int n, i;
 
@@ -1119,28 +1157,30 @@ rxa_intr_thread(void *arg)
 /* Dequeue <port, q> from interrupt ring and enqueue received
  * mbufs to eventdev
  */
-static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+static inline void
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
        uint32_t n;
        uint32_t nb_rx = 0;
        int rxq_empty;
-       struct rte_eth_event_enqueue_buffer *buf;
+       struct eth_event_enqueue_buffer *buf;
+       struct rte_event_eth_rx_adapter_stats *stats;
        rte_spinlock_t *ring_lock;
        uint8_t max_done = 0;
 
        if (rx_adapter->num_rx_intr == 0)
-               return 0;
+               return;
 
        if (rte_ring_count(rx_adapter->intr_ring) == 0
                && !rx_adapter->qd_valid)
-               return 0;
+               return;
 
        buf = &rx_adapter->event_enqueue_buffer;
+       stats = &rx_adapter->stats;
        ring_lock = &rx_adapter->intr_ring_lock;
 
        if (buf->count >= BATCH_SIZE)
-               rxa_flush_event_buffer(rx_adapter);
+               rxa_flush_event_buffer(rx_adapter, buf, stats);
 
        while (rxa_pkt_buf_available(buf)) {
                struct eth_device_info *dev_info;
@@ -1192,7 +1232,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
                                        continue;
                                n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
                                        rx_adapter->max_nb_rx,
-                                       &rxq_empty);
+                                       &rxq_empty, buf, stats);
                                nb_rx += n;
 
                                enq_buffer_full = !rxq_empty && n == 0;
@@ -1213,7 +1253,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
                } else {
                        n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
                                rx_adapter->max_nb_rx,
-                               &rxq_empty);
+                               &rxq_empty, buf, stats);
                        rx_adapter->qd_valid = !rxq_empty;
                        nb_rx += n;
                        if (nb_rx > rx_adapter->max_nb_rx)
@@ -1223,7 +1263,6 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
 
 done:
        rx_adapter->stats.rx_intr_packets += nb_rx;
-       return nb_rx;
 }
 
 /*
@@ -1239,18 +1278,18 @@ done:
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+static inline void
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
        uint32_t num_queue;
        uint32_t nb_rx = 0;
-       struct rte_eth_event_enqueue_buffer *buf;
+       struct eth_event_enqueue_buffer *buf = NULL;
+       struct rte_event_eth_rx_adapter_stats *stats = NULL;
        uint32_t wrr_pos;
        uint32_t max_nb_rx;
 
        wrr_pos = rx_adapter->wrr_pos;
        max_nb_rx = rx_adapter->max_nb_rx;
-       buf = &rx_adapter->event_enqueue_buffer;
 
        /* Iterate through a WRR sequence */
        for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
@@ -1258,40 +1297,48 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
                uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
                uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
 
+               buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
+
                /* Don't do a batch dequeue from the rx queue if there isn't
                 * enough space in the enqueue buffer.
                 */
                if (buf->count >= BATCH_SIZE)
-                       rxa_flush_event_buffer(rx_adapter);
+                       rxa_flush_event_buffer(rx_adapter, buf, stats);
                if (!rxa_pkt_buf_available(buf)) {
-                       rx_adapter->wrr_pos = wrr_pos;
-                       return nb_rx;
+                       if (rx_adapter->use_queue_event_buf)
+                               goto poll_next_entry;
+                       else {
+                               rx_adapter->wrr_pos = wrr_pos;
+                               return;
+                       }
                }
 
                nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
-                               NULL);
+                               NULL, buf, stats);
                if (nb_rx > max_nb_rx) {
                        rx_adapter->wrr_pos =
                                    (wrr_pos + 1) % rx_adapter->wrr_len;
                        break;
                }
 
+poll_next_entry:
                if (++wrr_pos == rx_adapter->wrr_len)
                        wrr_pos = 0;
        }
-       return nb_rx;
 }
 
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter = arg;
-       struct rte_eth_event_enqueue_buffer *buf =
-               &rx_adapter->event_enqueue_buffer;
+       struct event_eth_rx_adapter *rx_adapter = arg;
+       struct eth_event_enqueue_buffer *buf = NULL;
+       struct rte_event_eth_rx_adapter_stats *stats = NULL;
        struct rte_event *ev;
 
+       buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
+
        if (buf->count)
-               rxa_flush_event_buffer(rx_adapter);
+               rxa_flush_event_buffer(rx_adapter, buf, stats);
 
        if (vec->vector_ev->nb_elem == 0)
                return;
@@ -1309,8 +1356,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter = args;
-       struct rte_event_eth_rx_adapter_stats *stats;
+       struct event_eth_rx_adapter *rx_adapter = args;
 
        if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
                return 0;
@@ -1337,17 +1383,18 @@ rxa_service_func(void *args)
                }
        }
 
-       stats = &rx_adapter->stats;
-       stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
-       stats->rx_packets += rxa_poll(rx_adapter);
+       rxa_intr_ring_dequeue(rx_adapter);
+       rxa_poll(rx_adapter);
+
        rte_spinlock_unlock(&rx_adapter->rx_lock);
+
        return 0;
 }
 
 static int
 rte_event_eth_rx_adapter_init(void)
 {
-       const char *name = "rte_event_eth_rx_adapter_array";
+       const char *name = RXA_ADAPTER_ARRAY;
        const struct rte_memzone *mz;
        unsigned int sz;
 
@@ -1370,7 +1417,22 @@ rte_event_eth_rx_adapter_init(void)
        return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static int
+rxa_memzone_lookup(void)
+{
+       const struct rte_memzone *mz;
+
+       if (event_eth_rx_adapter == NULL) {
+               mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
+               if (mz == NULL)
+                       return -ENOMEM;
+               event_eth_rx_adapter = mz->addr;
+       }
+
+       return 0;
+}
+
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
        return event_eth_rx_adapter ?
@@ -1387,7 +1449,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
        int started;
        uint8_t port_id;
        struct rte_event_port_conf *port_conf = arg;
-       struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+       struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
        dev = &rte_eventdevs[rx_adapter->eventdev_id];
        dev_conf = dev->data->dev_conf;
@@ -1436,7 +1498,7 @@ rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
        if (rx_adapter->epd != INIT_FD)
                return 0;
@@ -1453,7 +1515,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
        int err;
        char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1497,7 +1559,7 @@ error:
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
        int err;
 
@@ -1518,7 +1580,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
        int ret;
 
@@ -1536,9 +1598,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+                struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
        int err;
        uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1566,9 +1627,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+                  struct eth_device_info *dev_info, int rx_queue_id)
 {
        int err;
        int i;
@@ -1625,9 +1685,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+               struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
        int err, err1;
        uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1715,9 +1774,8 @@ err_free_queue:
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+                  struct eth_device_info *dev_info, int rx_queue_id)
 
 {
        int i, j, err;
@@ -1765,9 +1823,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
        return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
        int ret;
        struct rte_service_spec service;
@@ -1810,10 +1867,9 @@ err_done:
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int32_t rx_queue_id,
-               uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+                struct eth_device_info *dev_info, int32_t rx_queue_id,
+                uint8_t add)
 {
        struct eth_rx_queue_info *queue_info;
        int enabled;
@@ -1863,9 +1919,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+          struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
        struct eth_rx_vector_data *vec;
        int pollq;
@@ -1903,13 +1958,23 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
        rx_adapter->num_rx_intr -= intrq;
        dev_info->nb_rx_intr -= intrq;
        dev_info->nb_shared_intr -= intrq && sintrq;
+       if (rx_adapter->use_queue_event_buf) {
+               struct eth_event_enqueue_buffer *event_buf =
+                       dev_info->rx_queue[rx_queue_id].event_buf;
+               struct rte_event_eth_rx_adapter_stats *stats =
+                       dev_info->rx_queue[rx_queue_id].stats;
+               rte_free(event_buf->events);
+               rte_free(event_buf);
+               rte_free(stats);
+               dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+               dev_info->rx_queue[rx_queue_id].stats = NULL;
+       }
 }
 
-static void
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       int32_t rx_queue_id,
-       const struct rte_event_eth_rx_adapter_queue_conf *conf)
+static int
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+             struct eth_device_info *dev_info, int32_t rx_queue_id,
+             const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
        struct eth_rx_queue_info *queue_info;
        const struct rte_event *ev = &conf->ev;
@@ -1917,15 +1982,22 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
        int intrq;
        int sintrq;
        struct rte_event *qi_ev;
+       struct eth_event_enqueue_buffer *new_rx_buf = NULL;
+       struct rte_event_eth_rx_adapter_stats *stats = NULL;
+       uint16_t eth_dev_id = dev_info->dev->data->port_id;
+       int ret;
 
        if (rx_queue_id == -1) {
                uint16_t nb_rx_queues;
                uint16_t i;
 
                nb_rx_queues = dev_info->dev->data->nb_rx_queues;
-               for (i = 0; i < nb_rx_queues; i++)
-                       rxa_add_queue(rx_adapter, dev_info, i, conf);
-               return;
+               for (i = 0; i < nb_rx_queues; i++) {
+                       ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
+                       if (ret)
+                               return ret;
+               }
+               return 0;
        }
 
        pollq = rxa_polled_queue(dev_info, rx_queue_id);
@@ -1947,6 +2019,24 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
        } else
                qi_ev->flow_id = 0;
 
+       if (conf->rx_queue_flags &
+           RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+               queue_info->ena_vector = 1;
+               qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
+               rxa_set_vector_data(queue_info, conf->vector_sz,
+                                   conf->vector_timeout_ns, conf->vector_mp,
+                                   rx_queue_id, dev_info->dev->data->port_id);
+               rx_adapter->ena_vector = 1;
+               rx_adapter->vector_tmo_ticks =
+                       rx_adapter->vector_tmo_ticks ?
+                                     RTE_MIN(queue_info->vector_data
+                                                       .vector_timeout_ticks >>
+                                               1,
+                                       rx_adapter->vector_tmo_ticks) :
+                               queue_info->vector_data.vector_timeout_ticks >>
+                                       1;
+       }
+
        rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
        if (rxa_polled_queue(dev_info, rx_queue_id)) {
                rx_adapter->num_rx_polled += !pollq;
@@ -1970,48 +2060,58 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
                                dev_info->next_q_idx = 0;
                }
        }
-}
 
-static void
-rxa_sw_event_vector_configure(
-       struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
-       int rx_queue_id,
-       const struct rte_event_eth_rx_adapter_event_vector_config *config)
-{
-       struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
-       struct eth_rx_queue_info *queue_info;
-       struct rte_event *qi_ev;
+       if (!rx_adapter->use_queue_event_buf)
+               return 0;
 
-       if (rx_queue_id == -1) {
-               uint16_t nb_rx_queues;
-               uint16_t i;
+       new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
+                               sizeof(*new_rx_buf), 0,
+                               rte_eth_dev_socket_id(eth_dev_id));
+       if (new_rx_buf == NULL) {
+               RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
+                                "dev_id: %d queue_id: %d",
+                                eth_dev_id, rx_queue_id);
+               return -ENOMEM;
+       }
 
-               nb_rx_queues = dev_info->dev->data->nb_rx_queues;
-               for (i = 0; i < nb_rx_queues; i++)
-                       rxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,
-                                                     config);
-               return;
+       new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
+       new_rx_buf->events_size += (2 * BATCH_SIZE);
+       new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
+                               sizeof(struct rte_event) *
+                               new_rx_buf->events_size, 0,
+                               rte_eth_dev_socket_id(eth_dev_id));
+       if (new_rx_buf->events == NULL) {
+               rte_free(new_rx_buf);
+               RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
+                                "dev_id: %d queue_id: %d",
+                                eth_dev_id, rx_queue_id);
+               return -ENOMEM;
        }
 
-       queue_info = &dev_info->rx_queue[rx_queue_id];
-       qi_ev = (struct rte_event *)&queue_info->event;
-       queue_info->ena_vector = 1;
-       qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
-       rxa_set_vector_data(queue_info, config->vector_sz,
-                           config->vector_timeout_ns, config->vector_mp,
-                           rx_queue_id, dev_info->dev->data->port_id);
-       rx_adapter->ena_vector = 1;
-       rx_adapter->vector_tmo_ticks =
-               rx_adapter->vector_tmo_ticks ?
-                             RTE_MIN(config->vector_timeout_ns >> 1,
-                               rx_adapter->vector_tmo_ticks) :
-                             config->vector_timeout_ns >> 1;
-}
-
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-               uint16_t eth_dev_id,
-               int rx_queue_id,
-               const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+       queue_info->event_buf = new_rx_buf;
+
+       /* Allocate storage for adapter queue stats */
+       stats = rte_zmalloc_socket("rx_queue_stats",
+                               sizeof(*stats), 0,
+                               rte_eth_dev_socket_id(eth_dev_id));
+       if (stats == NULL) {
+               rte_free(new_rx_buf->events);
+               rte_free(new_rx_buf);
+               RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
+                                " dev_id: %d queue_id: %d",
+                                eth_dev_id, rx_queue_id);
+               return -ENOMEM;
+       }
+
+       queue_info->stats = stats;
+
+       return 0;
+}
+
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+          int rx_queue_id,
+          const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
        struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
        struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2034,6 +2134,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
                        temp_conf.servicing_weight = 1;
                }
                queue_conf = &temp_conf;
+
+               if (queue_conf->servicing_weight == 0 &&
+                   rx_adapter->use_queue_event_buf) {
+
+                       RTE_EDEV_LOG_ERR("Use of queue level event buffer "
+                                        "not supported for interrupt queues "
+                                        "dev_id: %d queue_id: %d",
+                                        eth_dev_id, rx_queue_id);
+                       return -EINVAL;
+               }
        }
 
        nb_rx_queues = dev_info->dev->data->nb_rx_queues;
@@ -2113,7 +2223,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 
 
-       rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+       ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+       if (ret)
+               goto err_free_rxqueue;
        rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
 
        rte_free(rx_adapter->eth_rx_poll);
@@ -2134,13 +2246,13 @@ err_free_rxqueue:
        rte_free(rx_poll);
        rte_free(rx_wrr);
 
-       return 0;
+       return ret;
 }
 
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
        uint32_t i;
@@ -2182,12 +2294,15 @@ rxa_ctrl(uint8_t id, int start)
        return 0;
 }
 
-int
-rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
-                               rte_event_eth_rx_adapter_conf_cb conf_cb,
-                               void *conf_arg)
-{
-       struct rte_event_eth_rx_adapter *rx_adapter;
+static int
+rxa_create(uint8_t id, uint8_t dev_id,
+          struct rte_event_eth_rx_adapter_params *rxa_params,
+          rte_event_eth_rx_adapter_conf_cb conf_cb,
+          void *conf_arg)
+{
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_event_enqueue_buffer *buf;
+       struct rte_event *events;
        int ret;
        int socket_id;
        uint16_t i;
@@ -2202,6 +2317,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
 
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
        RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
        if (conf_cb == NULL)
                return -EINVAL;
 
@@ -2249,18 +2365,111 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
                rte_free(rx_adapter);
                return -ENOMEM;
        }
+
        rte_spinlock_init(&rx_adapter->rx_lock);
+
        for (i = 0; i < RTE_MAX_ETHPORTS; i++)
                rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
 
+       /* Rx adapter event buffer allocation */
+       rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
+
+       if (!rx_adapter->use_queue_event_buf) {
+               buf = &rx_adapter->event_enqueue_buffer;
+               buf->events_size = rxa_params->event_buf_size;
+
+               events = rte_zmalloc_socket(rx_adapter->mem_name,
+                                           buf->events_size * sizeof(*events),
+                                           0, socket_id);
+               if (events == NULL) {
+                       RTE_EDEV_LOG_ERR("Failed to allocate memory "
+                                        "for adapter event buffer");
+                       rte_free(rx_adapter->eth_devices);
+                       rte_free(rx_adapter);
+                       return -ENOMEM;
+               }
+
+               rx_adapter->event_enqueue_buffer.events = events;
+       }
+
        event_eth_rx_adapter[id] = rx_adapter;
+
        if (conf_cb == rxa_default_conf_cb)
                rx_adapter->default_cb_arg = 1;
+
+       if (rte_mbuf_dyn_rx_timestamp_register(
+                       &event_eth_rx_timestamp_dynfield_offset,
+                       &event_eth_rx_timestamp_dynflag) != 0) {
+               RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
+               return -rte_errno;
+       }
+
        rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
                conf_arg);
        return 0;
 }
 
+int
+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+                               rte_event_eth_rx_adapter_conf_cb conf_cb,
+                               void *conf_arg)
+{
+       struct rte_event_eth_rx_adapter_params rxa_params = {0};
+
+       /* use default values for adapter params */
+       rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
+       rxa_params.use_queue_event_buf = false;
+
+       return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
+}
+
+int
+rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
+                       struct rte_event_port_conf *port_config,
+                       struct rte_event_eth_rx_adapter_params *rxa_params)
+{
+       struct rte_event_port_conf *pc;
+       int ret;
+       struct rte_event_eth_rx_adapter_params temp_params = {0};
+
+       if (port_config == NULL)
+               return -EINVAL;
+
+       if (rxa_params == NULL) {
+               /* use default values if rxa_params is NULL */
+               rxa_params = &temp_params;
+               rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
+               rxa_params->use_queue_event_buf = false;
+       } else if ((!rxa_params->use_queue_event_buf &&
+                   rxa_params->event_buf_size == 0) ||
+                  (rxa_params->use_queue_event_buf &&
+                   rxa_params->event_buf_size != 0)) {
+               RTE_EDEV_LOG_ERR("Invalid adapter params\n");
+               return -EINVAL;
+       } else if (!rxa_params->use_queue_event_buf) {
+               /* adjust event buff size with BATCH_SIZE used for fetching
+                * packets from NIC rx queues to get full buffer utilization
+                * and prevent unnecessary rollovers.
+                */
+
+               rxa_params->event_buf_size =
+                       RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
+               rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
+       }
+
+       pc = rte_malloc(NULL, sizeof(*pc), 0);
+       if (pc == NULL)
+               return -ENOMEM;
+
+       *pc = *port_config;
+
+       ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
+       if (ret)
+               rte_free(pc);
+
+       return ret;
+}
+
 int
 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
                struct rte_event_port_conf *port_config)
@@ -2270,12 +2479,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 
        if (port_config == NULL)
                return -EINVAL;
+
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
        pc = rte_malloc(NULL, sizeof(*pc), 0);
        if (pc == NULL)
                return -ENOMEM;
        *pc = *port_config;
+
        ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
                                        rxa_default_conf_cb,
                                        pc);
@@ -2287,7 +2498,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
 
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2304,6 +2515,8 @@ rte_event_eth_rx_adapter_free(uint8_t id)
        if (rx_adapter->default_cb_arg)
                rte_free(rx_adapter->conf_arg);
        rte_free(rx_adapter->eth_devices);
+       if (!rx_adapter->use_queue_event_buf)
+               rte_free(rx_adapter->event_enqueue_buffer.events);
        rte_free(rx_adapter);
        event_eth_rx_adapter[id] = NULL;
 
@@ -2319,9 +2532,10 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
        int ret;
        uint32_t cap;
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
+       struct rte_event_eth_rx_adapter_vector_limits limits;
 
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
        RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
@@ -2349,13 +2563,46 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
                return -EINVAL;
        }
 
-       if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0 &&
-           (queue_conf->rx_queue_flags &
-            RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)) {
-               RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
-                                " eth port: %" PRIu16 " adapter id: %" PRIu8,
-                                eth_dev_id, id);
-               return -EINVAL;
+       if (queue_conf->rx_queue_flags &
+           RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+
+               if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
+                       RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+                                        " eth port: %" PRIu16
+                                        " adapter id: %" PRIu8,
+                                        eth_dev_id, id);
+                       return -EINVAL;
+               }
+
+               ret = rte_event_eth_rx_adapter_vector_limits_get(
+                       rx_adapter->eventdev_id, eth_dev_id, &limits);
+               if (ret < 0) {
+                       RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
+                                        " eth port: %" PRIu16
+                                        " adapter id: %" PRIu8,
+                                        eth_dev_id, id);
+                       return -EINVAL;
+               }
+               if (queue_conf->vector_sz < limits.min_sz ||
+                   queue_conf->vector_sz > limits.max_sz ||
+                   queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
+                   queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
+                   queue_conf->vector_mp == NULL) {
+                       RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+                                        " eth port: %" PRIu16
+                                        " adapter id: %" PRIu8,
+                                        eth_dev_id, id);
+                       return -EINVAL;
+               }
+               if (queue_conf->vector_mp->elt_size <
+                   (sizeof(struct rte_event_vector) +
+                    (sizeof(uintptr_t) * queue_conf->vector_sz))) {
+                       RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+                                        " eth port: %" PRIu16
+                                        " adapter id: %" PRIu8,
+                                        eth_dev_id, id);
+                       return -EINVAL;
+               }
        }
 
        if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
@@ -2373,6 +2620,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
                return -EINVAL;
        }
 
+       if ((rx_adapter->use_queue_event_buf &&
+            queue_conf->event_buf_size == 0) ||
+           (!rx_adapter->use_queue_event_buf &&
+            queue_conf->event_buf_size != 0)) {
+               RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
+               return -EINVAL;
+       }
+
        dev_info = &rx_adapter->eth_devices[eth_dev_id];
 
        if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
@@ -2437,7 +2692,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
        int ret = 0;
        struct rte_eventdev *dev;
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct eth_device_info *dev_info;
        uint32_t cap;
        uint32_t nb_rx_poll = 0;
@@ -2528,6 +2783,11 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
                rx_adapter->eth_rx_poll = rx_poll;
                rx_adapter->wrr_sched = rx_wrr;
                rx_adapter->wrr_len = nb_wrr;
+               /*
+                * reset next poll start position (wrr_pos) to avoid buffer
+                * overrun when wrr_len is reduced in case of queue delete
+                */
+               rx_adapter->wrr_pos = 0;
                rx_adapter->num_intr_vec += num_intr_vec;
 
                if (dev_info->nb_dev_queues == 0) {
@@ -2551,83 +2811,6 @@ unlock_ret:
        return ret;
 }
 
-int
-rte_event_eth_rx_adapter_queue_event_vector_config(
-       uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
-       struct rte_event_eth_rx_adapter_event_vector_config *config)
-{
-       struct rte_event_eth_rx_adapter_vector_limits limits;
-       struct rte_event_eth_rx_adapter *rx_adapter;
-       struct rte_eventdev *dev;
-       uint32_t cap;
-       int ret;
-
-       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
-
-       rx_adapter = rxa_id_to_adapter(id);
-       if ((rx_adapter == NULL) || (config == NULL))
-               return -EINVAL;
-
-       dev = &rte_eventdevs[rx_adapter->eventdev_id];
-       ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
-                                               eth_dev_id, &cap);
-       if (ret) {
-               RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
-                                "eth port %" PRIu16,
-                                id, eth_dev_id);
-               return ret;
-       }
-
-       if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR)) {
-               RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
-                                " eth port: %" PRIu16 " adapter id: %" PRIu8,
-                                eth_dev_id, id);
-               return -EINVAL;
-       }
-
-       ret = rte_event_eth_rx_adapter_vector_limits_get(
-               rx_adapter->eventdev_id, eth_dev_id, &limits);
-       if (ret) {
-               RTE_EDEV_LOG_ERR("Failed to get vector limits edev %" PRIu8
-                                "eth port %" PRIu16,
-                                rx_adapter->eventdev_id, eth_dev_id);
-               return ret;
-       }
-
-       if (config->vector_sz < limits.min_sz ||
-           config->vector_sz > limits.max_sz ||
-           config->vector_timeout_ns < limits.min_timeout_ns ||
-           config->vector_timeout_ns > limits.max_timeout_ns ||
-           config->vector_mp == NULL) {
-               RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
-                                " eth port: %" PRIu16 " adapter id: %" PRIu8,
-                                eth_dev_id, id);
-               return -EINVAL;
-       }
-       if (config->vector_mp->elt_size <
-           (sizeof(struct rte_event_vector) +
-            (sizeof(uintptr_t) * config->vector_sz))) {
-               RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
-                                " eth port: %" PRIu16 " adapter id: %" PRIu8,
-                                eth_dev_id, id);
-               return -EINVAL;
-       }
-
-       if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
-               RTE_FUNC_PTR_OR_ERR_RET(
-                       *dev->dev_ops->eth_rx_adapter_event_vector_config,
-                       -ENOTSUP);
-               ret = dev->dev_ops->eth_rx_adapter_event_vector_config(
-                       dev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);
-       } else {
-               rxa_sw_event_vector_configure(rx_adapter, eth_dev_id,
-                                             rx_queue_id, config);
-       }
-
-       return ret;
-}
-
 int
 rte_event_eth_rx_adapter_vector_limits_get(
        uint8_t dev_id, uint16_t eth_port_id,
@@ -2680,18 +2863,33 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
        return rxa_ctrl(id, 0);
 }
 
+static inline void
+rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
+{
+       struct rte_event_eth_rx_adapter_stats *q_stats;
+
+       q_stats = queue_info->stats;
+       memset(q_stats, 0, sizeof(*q_stats));
+}
+
 int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
                               struct rte_event_eth_rx_adapter_stats *stats)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_event_enqueue_buffer *buf;
        struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
        struct rte_event_eth_rx_adapter_stats dev_stats;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
-       uint32_t i;
+       struct eth_rx_queue_info *queue_info;
+       struct rte_event_eth_rx_adapter_stats *q_stats;
+       uint32_t i, j;
        int ret;
 
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
        rx_adapter = rxa_id_to_adapter(id);
@@ -2700,8 +2898,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 
        dev = &rte_eventdevs[rx_adapter->eventdev_id];
        memset(stats, 0, sizeof(*stats));
+
+       if (rx_adapter->service_inited)
+               *stats = rx_adapter->stats;
+
        RTE_ETH_FOREACH_DEV(i) {
                dev_info = &rx_adapter->eth_devices[i];
+
+               if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
+
+                       for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+                            j++) {
+                               queue_info = &dev_info->rx_queue[j];
+                               if (!queue_info->queue_enabled)
+                                       continue;
+                               q_stats = queue_info->stats;
+
+                               stats->rx_packets += q_stats->rx_packets;
+                               stats->rx_poll_count += q_stats->rx_poll_count;
+                               stats->rx_enq_count += q_stats->rx_enq_count;
+                               stats->rx_enq_retry += q_stats->rx_enq_retry;
+                               stats->rx_dropped += q_stats->rx_dropped;
+                               stats->rx_enq_block_cycles +=
+                                               q_stats->rx_enq_block_cycles;
+                       }
+               }
+
                if (dev_info->internal_event_port == 0 ||
                        dev->dev_ops->eth_rx_adapter_stats_get == NULL)
                        continue;
@@ -2714,21 +2936,87 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
                dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
        }
 
-       if (rx_adapter->service_inited)
-               *stats = rx_adapter->stats;
-
+       buf = &rx_adapter->event_enqueue_buffer;
        stats->rx_packets += dev_stats_sum.rx_packets;
        stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+       stats->rx_event_buf_count = buf->count;
+       stats->rx_event_buf_size = buf->events_size;
+
+       return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+               uint16_t eth_dev_id,
+               uint16_t rx_queue_id,
+               struct rte_event_eth_rx_adapter_queue_stats *stats)
+{
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_device_info *dev_info;
+       struct eth_rx_queue_info *queue_info;
+       struct eth_event_enqueue_buffer *event_buf;
+       struct rte_event_eth_rx_adapter_stats *q_stats;
+       struct rte_eventdev *dev;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       rx_adapter = rxa_id_to_adapter(id);
+
+       if (rx_adapter == NULL || stats == NULL)
+               return -EINVAL;
+
+       if (!rx_adapter->use_queue_event_buf)
+               return -EINVAL;
+
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+               return -EINVAL;
+       }
+
+       dev_info = &rx_adapter->eth_devices[eth_dev_id];
+       if (dev_info->rx_queue == NULL ||
+           !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+               RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+               return -EINVAL;
+       }
+
+       if (dev_info->internal_event_port == 0) {
+               queue_info = &dev_info->rx_queue[rx_queue_id];
+               event_buf = queue_info->event_buf;
+               q_stats = queue_info->stats;
+
+               stats->rx_event_buf_count = event_buf->count;
+               stats->rx_event_buf_size = event_buf->events_size;
+               stats->rx_packets = q_stats->rx_packets;
+               stats->rx_poll_count = q_stats->rx_poll_count;
+               stats->rx_dropped = q_stats->rx_dropped;
+       }
+
+       dev = &rte_eventdevs[rx_adapter->eventdev_id];
+       if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
+               return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
+                                               &rte_eth_devices[eth_dev_id],
+                                               rx_queue_id, stats);
+       }
+
        return 0;
 }
 
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
-       uint32_t i;
+       struct eth_rx_queue_info *queue_info;
+       uint32_t i, j;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
 
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2737,8 +3025,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
                return -EINVAL;
 
        dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
        RTE_ETH_FOREACH_DEV(i) {
                dev_info = &rx_adapter->eth_devices[i];
+
+               if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+                       for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+                                               j++) {
+                               queue_info = &dev_info->rx_queue[j];
+                               if (!queue_info->queue_enabled)
+                                       continue;
+                               rxa_queue_stats_reset(queue_info);
+                       }
+               }
+
                if (dev_info->internal_event_port == 0 ||
                        dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
                        continue;
@@ -2747,13 +3048,68 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
        }
 
        memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+
+       return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+               uint16_t eth_dev_id,
+               uint16_t rx_queue_id)
+{
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_device_info *dev_info;
+       struct eth_rx_queue_info *queue_info;
+       struct rte_eventdev *dev;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       rx_adapter = rxa_id_to_adapter(id);
+       if (rx_adapter == NULL)
+               return -EINVAL;
+
+       if (!rx_adapter->use_queue_event_buf)
+               return -EINVAL;
+
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+               return -EINVAL;
+       }
+
+       dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+       if (dev_info->rx_queue == NULL ||
+           !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+               RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+               return -EINVAL;
+       }
+
+       if (dev_info->internal_event_port == 0) {
+               queue_info = &dev_info->rx_queue[rx_queue_id];
+               rxa_queue_stats_reset(queue_info);
+       }
+
+       dev = &rte_eventdevs[rx_adapter->eventdev_id];
+       if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
+               return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
+                                               &rte_eth_devices[eth_dev_id],
+                                               rx_queue_id);
+       }
+
        return 0;
 }
 
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
 
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2767,13 +3123,33 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
        return rx_adapter->service_inited ? 0 : -ESRCH;
 }
 
+int
+rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
+{
+       struct event_eth_rx_adapter *rx_adapter;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+       rx_adapter = rxa_id_to_adapter(id);
+       if (rx_adapter == NULL || event_port_id == NULL)
+               return -EINVAL;
+
+       if (rx_adapter->service_inited)
+               *event_port_id = rx_adapter->event_port_id;
+
+       return rx_adapter->service_inited ? 0 : -ESRCH;
+}
+
 int
 rte_event_eth_rx_adapter_cb_register(uint8_t id,
                                        uint16_t eth_dev_id,
                                        rte_event_eth_rx_adapter_cb_fn cb_fn,
                                        void *cb_arg)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct eth_device_info *dev_info;
        uint32_t cap;
        int ret;
@@ -2811,3 +3187,334 @@ rte_event_eth_rx_adapter_cb_register(uint8_t id,
 
        return 0;
 }
+
+int
+rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
+                       uint16_t eth_dev_id,
+                       uint16_t rx_queue_id,
+                       struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+       struct rte_eventdev *dev;
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_device_info *dev_info;
+       struct eth_rx_queue_info *queue_info;
+       struct rte_event *qi_ev;
+       int ret;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+               return -EINVAL;
+       }
+
+       if (queue_conf == NULL) {
+               RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
+               return -EINVAL;
+       }
+
+       rx_adapter = rxa_id_to_adapter(id);
+       if (rx_adapter == NULL)
+               return -EINVAL;
+
+       dev_info = &rx_adapter->eth_devices[eth_dev_id];
+       if (dev_info->rx_queue == NULL ||
+           !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+               RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+               return -EINVAL;
+       }
+
+       queue_info = &dev_info->rx_queue[rx_queue_id];
+       qi_ev = (struct rte_event *)&queue_info->event;
+
+       memset(queue_conf, 0, sizeof(*queue_conf));
+       queue_conf->rx_queue_flags = 0;
+       if (queue_info->flow_id_mask != 0)
+               queue_conf->rx_queue_flags |=
+                       RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+       queue_conf->servicing_weight = queue_info->wt;
+
+       memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
+
+       dev = &rte_eventdevs[rx_adapter->eventdev_id];
+       if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
+               ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
+                                               &rte_eth_devices[eth_dev_id],
+                                               rx_queue_id,
+                                               queue_conf);
+               return ret;
+       }
+
+       return 0;
+}
+
+#define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
+
+static int
+handle_rxa_stats(const char *cmd __rte_unused,
+                const char *params,
+                struct rte_tel_data *d)
+{
+       uint8_t rx_adapter_id;
+       struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       rx_adapter_id = atoi(params);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+       /* Get Rx adapter stats */
+       if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
+                                              &rx_adptr_stats)) {
+               RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
+               return -1;
+       }
+
+       rte_tel_data_start_dict(d);
+       rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+       RXA_ADD_DICT(rx_adptr_stats, rx_packets);
+       RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
+       RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
+       RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
+       RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
+       RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
+
+       return 0;
+}
+
+static int
+handle_rxa_stats_reset(const char *cmd __rte_unused,
+                      const char *params,
+                      struct rte_tel_data *d __rte_unused)
+{
+       uint8_t rx_adapter_id;
+
+       if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       rx_adapter_id = atoi(params);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+       /* Reset Rx adapter stats */
+       if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
+               RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+handle_rxa_get_queue_conf(const char *cmd __rte_unused,
+                         const char *params,
+                         struct rte_tel_data *d)
+{
+       uint8_t rx_adapter_id;
+       uint16_t rx_queue_id;
+       int eth_dev_id;
+       char *token, *l_params;
+       struct rte_event_eth_rx_adapter_queue_conf queue_conf;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       l_params = strdup(params);
+       token = strtok(l_params, ",");
+       rx_adapter_id = strtoul(token, NULL, 10);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+               return -1;
+
+       /* Get device ID from parameter string */
+       eth_dev_id = strtoul(token, NULL, 10);
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+               return -1;
+
+       /* Get Rx queue ID from parameter string */
+       rx_queue_id = strtoul(token, NULL, 10);
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+               return -EINVAL;
+       }
+
+       token = strtok(NULL, "\0");
+       if (token != NULL)
+               RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+                                " telemetry command, ignoring");
+
+       if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
+                                                   rx_queue_id, &queue_conf)) {
+               RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
+               return -1;
+       }
+
+       rte_tel_data_start_dict(d);
+       rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+       rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+       rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+       RXA_ADD_DICT(queue_conf, rx_queue_flags);
+       RXA_ADD_DICT(queue_conf, servicing_weight);
+       RXA_ADD_DICT(queue_conf.ev, queue_id);
+       RXA_ADD_DICT(queue_conf.ev, sched_type);
+       RXA_ADD_DICT(queue_conf.ev, priority);
+       RXA_ADD_DICT(queue_conf.ev, flow_id);
+
+       return 0;
+}
+
+static int
+handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+                          const char *params,
+                          struct rte_tel_data *d)
+{
+       uint8_t rx_adapter_id;
+       uint16_t rx_queue_id;
+       int eth_dev_id;
+       char *token, *l_params;
+       struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       l_params = strdup(params);
+       token = strtok(l_params, ",");
+       rx_adapter_id = strtoul(token, NULL, 10);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+               return -1;
+
+       /* Get device ID from parameter string */
+       eth_dev_id = strtoul(token, NULL, 10);
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+               return -1;
+
+       /* Get Rx queue ID from parameter string */
+       rx_queue_id = strtoul(token, NULL, 10);
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+               return -EINVAL;
+       }
+
+       token = strtok(NULL, "\0");
+       if (token != NULL)
+               RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+                                " telemetry command, ignoring");
+
+       if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+                                                   rx_queue_id, &q_stats)) {
+               RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
+               return -1;
+       }
+
+       rte_tel_data_start_dict(d);
+       rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+       rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+       rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+       RXA_ADD_DICT(q_stats, rx_event_buf_count);
+       RXA_ADD_DICT(q_stats, rx_event_buf_size);
+       RXA_ADD_DICT(q_stats, rx_poll_count);
+       RXA_ADD_DICT(q_stats, rx_packets);
+       RXA_ADD_DICT(q_stats, rx_dropped);
+
+       return 0;
+}
+
+static int
+handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+                            const char *params,
+                            struct rte_tel_data *d __rte_unused)
+{
+       uint8_t rx_adapter_id;
+       uint16_t rx_queue_id;
+       int eth_dev_id;
+       char *token, *l_params;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       l_params = strdup(params);
+       token = strtok(l_params, ",");
+       rx_adapter_id = strtoul(token, NULL, 10);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+               return -1;
+
+       /* Get device ID from parameter string */
+       eth_dev_id = strtoul(token, NULL, 10);
+       RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       if (token == NULL || strlen(token) == 0 || !isdigit(*token))
+               return -1;
+
+       /* Get Rx queue ID from parameter string */
+       rx_queue_id = strtoul(token, NULL, 10);
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+               return -EINVAL;
+       }
+
+       token = strtok(NULL, "\0");
+       if (token != NULL)
+               RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+                                " telemetry command, ignoring");
+
+       if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+                                                      eth_dev_id,
+                                                      rx_queue_id)) {
+               RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
+               return -1;
+       }
+
+       return 0;
+}
+
+RTE_INIT(rxa_init_telemetry)
+{
+       rte_telemetry_register_cmd("/eventdev/rxa_stats",
+               handle_rxa_stats,
+               "Returns Rx adapter stats. Parameter: rxa_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
+               handle_rxa_stats_reset,
+               "Reset Rx adapter stats. Parameter: rxa_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
+               handle_rxa_get_queue_conf,
+               "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
+               handle_rxa_get_queue_stats,
+               "Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
+               handle_rxa_queue_stats_reset,
+               "Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+}