common/cnxk: fix null pointer dereference
[dpdk.git] / lib / eventdev / rte_event_eth_rx_adapter.c
index e6de036..ff83ce8 100644 (file)
 #include <rte_thash.h>
 #include <rte_interrupts.h>
 #include <rte_mbuf_dyn.h>
+#include <rte_telemetry.h>
 
 #include "rte_eventdev.h"
 #include "eventdev_pmd.h"
-#include "rte_eventdev_trace.h"
+#include "eventdev_trace.h"
 #include "rte_event_eth_rx_adapter.h"
 
 #define BATCH_SIZE             32
@@ -81,7 +82,7 @@ struct eth_rx_vector_data {
 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
 
 /* Instance per adapter */
-struct rte_eth_event_enqueue_buffer {
+struct eth_event_enqueue_buffer {
        /* Count of events in this buffer */
        uint16_t count;
        /* Array of events in this buffer */
@@ -97,15 +98,17 @@ struct rte_eth_event_enqueue_buffer {
        uint16_t last_mask;
 };
 
-struct rte_event_eth_rx_adapter {
+struct event_eth_rx_adapter {
        /* RSS key */
        uint8_t rss_key_be[RSS_KEY_SIZE];
        /* Event device identifier */
        uint8_t eventdev_id;
-       /* Per ethernet device structure */
-       struct eth_device_info *eth_devices;
        /* Event port identifier */
        uint8_t event_port_id;
+       /* Flag indicating per rxq event buffer */
+       bool use_queue_event_buf;
+       /* Per ethernet device structure */
+       struct eth_device_info *eth_devices;
        /* Lock to serialize config updates with service function */
        rte_spinlock_t rx_lock;
        /* Max mbufs processed in any service function invocation */
@@ -121,7 +124,7 @@ struct rte_event_eth_rx_adapter {
        /* Next entry in wrr[] to begin polling */
        uint32_t wrr_pos;
        /* Event burst buffer */
-       struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+       struct eth_event_enqueue_buffer event_enqueue_buffer;
        /* Vector enable flag */
        uint8_t ena_vector;
        /* Timestamp of previous vector expiry list traversal */
@@ -241,9 +244,14 @@ struct eth_rx_queue_info {
        uint32_t flow_id_mask;  /* Set to ~0 if app provides flow id else 0 */
        uint64_t event;
        struct eth_rx_vector_data vector_data;
+       struct eth_event_enqueue_buffer *event_buf;
+       /* use adapter stats struct for queue level stats,
+        * as same stats need to be updated for adapter and queue
+        */
+       struct rte_event_eth_rx_adapter_stats *stats;
 };
 
-static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+static struct event_eth_rx_adapter **event_eth_rx_adapter;
 
 /* Enable dynamic timestamp field in mbuf */
 static uint64_t event_eth_rx_timestamp_dynflag;
@@ -262,6 +270,22 @@ rxa_validate_id(uint8_t id)
        return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
 }
 
+static inline struct eth_event_enqueue_buffer *
+rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+                 uint16_t rx_queue_id,
+                 struct rte_event_eth_rx_adapter_stats **stats)
+{
+       if (rx_adapter->use_queue_event_buf) {
+               struct eth_device_info *dev_info =
+                       &rx_adapter->eth_devices[eth_dev_id];
+               *stats = dev_info->rx_queue[rx_queue_id].stats;
+               return dev_info->rx_queue[rx_queue_id].event_buf;
+       } else {
+               *stats = &rx_adapter->stats;
+               return &rx_adapter->event_enqueue_buffer;
+       }
+}
+
 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
        if (!rxa_validate_id(id)) { \
                RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
@@ -269,8 +293,32 @@ rxa_validate_id(uint8_t id)
        } \
 } while (0)
 
+#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \
+       if (!rxa_validate_id(id)) { \
+               RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
+               ret = retval; \
+               goto error; \
+       } \
+} while (0)
+
+#define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \
+       if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \
+               RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token\n"); \
+               ret = retval; \
+               goto error; \
+       } \
+} while (0)
+
+#define RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(port_id, retval) do { \
+       if (!rte_eth_dev_is_valid_port(port_id)) { \
+               RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
+               ret = retval; \
+               goto error; \
+       } \
+} while (0)
+
 static inline int
-rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
 {
        return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
 }
@@ -288,10 +336,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
  */
 static int
-rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
-        unsigned int n, int *cw,
-        struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
-        uint16_t gcd, int prev)
+rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
+            struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+            uint16_t gcd, int prev)
 {
        int i = prev;
        uint16_t w;
@@ -396,10 +443,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
  */
 static void
-rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint32_t *nb_rx_intr)
+rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint32_t *nb_rx_intr)
 {
        uint32_t intr_diff;
 
@@ -415,12 +461,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * interrupt queues could currently be poll mode Rx queues
  */
 static void
-rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint32_t *nb_rx_poll,
-                       uint32_t *nb_rx_intr,
-                       uint32_t *nb_wrr)
+rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+                         uint32_t *nb_wrr)
 {
        uint32_t intr_diff;
        uint32_t poll_diff;
@@ -447,11 +491,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
  * after deleting poll mode rx queues
  */
 static void
-rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint32_t *nb_rx_poll,
-                       uint32_t *nb_wrr)
+rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint32_t *nb_rx_poll, uint32_t *nb_wrr)
 {
        uint32_t poll_diff;
        uint32_t wrr_len_diff;
@@ -472,13 +514,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
 /* Calculate nb_rx_* after adding poll mode rx queues
  */
 static void
-rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       int rx_queue_id,
-                       uint16_t wt,
-                       uint32_t *nb_rx_poll,
-                       uint32_t *nb_rx_intr,
-                       uint32_t *nb_wrr)
+rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info, int rx_queue_id,
+                         uint16_t wt, uint32_t *nb_rx_poll,
+                         uint32_t *nb_rx_intr, uint32_t *nb_wrr)
 {
        uint32_t intr_diff;
        uint32_t poll_diff;
@@ -505,13 +544,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after adding rx_queue_id */
 static void
-rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int rx_queue_id,
-               uint16_t wt,
-               uint32_t *nb_rx_poll,
-               uint32_t *nb_rx_intr,
-               uint32_t *nb_wrr)
+rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
+                    struct eth_device_info *dev_info, int rx_queue_id,
+                    uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+                    uint32_t *nb_wrr)
 {
        if (wt != 0)
                rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
@@ -523,12 +559,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Calculate nb_rx_* after deleting rx_queue_id */
 static void
-rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int rx_queue_id,
-               uint32_t *nb_rx_poll,
-               uint32_t *nb_rx_intr,
-               uint32_t *nb_wrr)
+rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
+                    struct eth_device_info *dev_info, int rx_queue_id,
+                    uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
+                    uint32_t *nb_wrr)
 {
        rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
                                nb_wrr);
@@ -540,8 +574,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the rx_poll array
  */
 static struct eth_rx_poll_entry *
-rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
-       uint32_t num_rx_polled)
+rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
 {
        size_t len;
 
@@ -557,7 +590,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
  * Allocate the WRR array
  */
 static uint32_t *
-rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
 {
        size_t len;
 
@@ -570,11 +603,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
 }
 
 static int
-rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
-               uint32_t nb_poll,
-               uint32_t nb_wrr,
-               struct eth_rx_poll_entry **rx_poll,
-               uint32_t **wrr_sched)
+rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
+                     uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
+                     uint32_t **wrr_sched)
 {
 
        if (nb_poll == 0) {
@@ -599,9 +630,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Precalculate WRR polling sequence for all queues in rx_adapter */
 static void
-rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_rx_poll_entry *rx_poll,
-               uint32_t *rx_wrr)
+rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
+                     struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
 {
        uint16_t d;
        uint16_t q;
@@ -728,13 +758,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
 }
 
 static inline int
-rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
 {
        return !!rx_adapter->enq_block_count;
 }
 
 static inline void
-rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
 {
        if (rx_adapter->rx_enq_block_start_ts)
                return;
@@ -747,8 +777,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
-                   struct rte_event_eth_rx_adapter_stats *stats)
+rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
+                    struct rte_event_eth_rx_adapter_stats *stats)
 {
        if (unlikely(!stats->rx_enq_start_ts))
                stats->rx_enq_start_ts = rte_get_tsc_cycles();
@@ -767,24 +797,29 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Enqueue buffered events to event device */
 static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
+                      struct eth_event_enqueue_buffer *buf,
+                      struct rte_event_eth_rx_adapter_stats *stats)
 {
-       struct rte_eth_event_enqueue_buffer *buf =
-           &rx_adapter->event_enqueue_buffer;
-       struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
-       uint16_t count = buf->last ? buf->last - buf->head : buf->count;
+       uint16_t count = buf->count;
+       uint16_t n = 0;
 
        if (!count)
                return 0;
 
-       uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
-                                       rx_adapter->event_port_id,
-                                       &buf->events[buf->head],
-                                       count);
-       if (n != count)
-               stats->rx_enq_retry++;
+       if (buf->last)
+               count = buf->last - buf->head;
 
-       buf->head += n;
+       if (count) {
+               n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
+                                               rx_adapter->event_port_id,
+                                               &buf->events[buf->head],
+                                               count);
+               if (n != count)
+                       stats->rx_enq_retry++;
+
+               buf->head += n;
+       }
 
        if (buf->last && n == count) {
                uint16_t n1;
@@ -813,7 +848,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static inline void
-rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
                struct eth_rx_vector_data *vec)
 {
        vec->vector_ev->nb_elem = 0;
@@ -824,9 +859,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline uint16_t
-rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
                        struct eth_rx_queue_info *queue_info,
-                       struct rte_eth_event_enqueue_buffer *buf,
+                       struct eth_event_enqueue_buffer *buf,
                        struct rte_mbuf **mbufs, uint16_t num)
 {
        struct rte_event *ev = &buf->events[buf->count];
@@ -884,19 +919,16 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline void
-rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
-               uint16_t eth_dev_id,
-               uint16_t rx_queue_id,
-               struct rte_mbuf **mbufs,
-               uint16_t num)
+rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+                uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
+                struct eth_event_enqueue_buffer *buf,
+                struct rte_event_eth_rx_adapter_stats *stats)
 {
        uint32_t i;
        struct eth_device_info *dev_info =
                                        &rx_adapter->eth_devices[eth_dev_id];
        struct eth_rx_queue_info *eth_rx_queue_info =
                                        &dev_info->rx_queue[rx_queue_id];
-       struct rte_eth_event_enqueue_buffer *buf =
-                                       &rx_adapter->event_enqueue_buffer;
        uint16_t new_tail = buf->tail;
        uint64_t event = eth_rx_queue_info->event;
        uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
@@ -912,14 +944,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
                ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
                                                0 : rte_get_tsc_cycles();
 
-               /* 0xffff ffff ffff ffff if PKT_RX_TIMESTAMP is set,
+               /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
                 * otherwise 0
                 */
                ts_mask = (uint64_t)(!(m->ol_flags &
                                       event_eth_rx_timestamp_dynflag)) - 1ULL;
 
-               /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
-               rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
+               /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
+               rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
                do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
                for (i = 0; i < num; i++) {
                        struct rte_event *ev;
@@ -961,7 +993,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
                else
                        num = nb_cb;
                if (dropped)
-                       rx_adapter->stats.rx_dropped += dropped;
+                       stats->rx_dropped += dropped;
        }
 
        buf->count += num;
@@ -969,7 +1001,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static inline bool
-rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
+rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
 {
        uint32_t nb_req = buf->tail + BATCH_SIZE;
 
@@ -990,20 +1022,15 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
 
 /* Enqueue packets from  <port, q>  to event buffer */
 static inline uint32_t
-rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
-       uint16_t port_id,
-       uint16_t queue_id,
-       uint32_t rx_count,
-       uint32_t max_rx,
-       int *rxq_empty)
+rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
+          uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
+          int *rxq_empty, struct eth_event_enqueue_buffer *buf,
+          struct rte_event_eth_rx_adapter_stats *stats)
 {
        struct rte_mbuf *mbufs[BATCH_SIZE];
-       struct rte_eth_event_enqueue_buffer *buf =
-                                       &rx_adapter->event_enqueue_buffer;
-       struct rte_event_eth_rx_adapter_stats *stats =
-                                       &rx_adapter->stats;
        uint16_t n;
        uint32_t nb_rx = 0;
+       uint32_t nb_flushed = 0;
 
        if (rxq_empty)
                *rxq_empty = 0;
@@ -1012,7 +1039,8 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
         */
        while (rxa_pkt_buf_available(buf)) {
                if (buf->count >= BATCH_SIZE)
-                       rxa_flush_event_buffer(rx_adapter);
+                       nb_flushed +=
+                               rxa_flush_event_buffer(rx_adapter, buf, stats);
 
                stats->rx_poll_count++;
                n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
@@ -1021,21 +1049,26 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
                                *rxq_empty = 1;
                        break;
                }
-               rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
+               rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
+                                stats);
                nb_rx += n;
                if (rx_count + nb_rx > max_rx)
                        break;
        }
 
        if (buf->count > 0)
-               rxa_flush_event_buffer(rx_adapter);
+               nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats);
+
+       stats->rx_packets += nb_rx;
+       if (nb_flushed == 0)
+               rte_event_maintain(rx_adapter->eventdev_id,
+                                  rx_adapter->event_port_id, 0);
 
        return nb_rx;
 }
 
 static inline void
-rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
-               void *data)
+rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
 {
        uint16_t port_id;
        uint16_t queue;
@@ -1075,8 +1108,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
-                       uint32_t num_intr_vec)
+rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
+                         uint32_t num_intr_vec)
 {
        if (rx_adapter->num_intr_vec + num_intr_vec >
                                RTE_EVENT_ETH_INTR_RING_SIZE) {
@@ -1091,9 +1124,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
 
 /* Delete entries for (dev, queue) from the interrupt ring */
 static void
-rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
-                       struct eth_device_info *dev_info,
-                       uint16_t rx_queue_id)
+rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
+                         struct eth_device_info *dev_info,
+                         uint16_t rx_queue_id)
 {
        int i, n;
        union queue_data qd;
@@ -1126,7 +1159,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
 static void *
 rxa_intr_thread(void *arg)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter = arg;
+       struct event_eth_rx_adapter *rx_adapter = arg;
        struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
        int n, i;
 
@@ -1148,28 +1181,30 @@ rxa_intr_thread(void *arg)
 /* Dequeue <port, q> from interrupt ring and enqueue received
  * mbufs to eventdev
  */
-static inline uint32_t
-rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+static inline void
+rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
 {
        uint32_t n;
        uint32_t nb_rx = 0;
        int rxq_empty;
-       struct rte_eth_event_enqueue_buffer *buf;
+       struct eth_event_enqueue_buffer *buf;
+       struct rte_event_eth_rx_adapter_stats *stats;
        rte_spinlock_t *ring_lock;
        uint8_t max_done = 0;
 
        if (rx_adapter->num_rx_intr == 0)
-               return 0;
+               return;
 
        if (rte_ring_count(rx_adapter->intr_ring) == 0
                && !rx_adapter->qd_valid)
-               return 0;
+               return;
 
        buf = &rx_adapter->event_enqueue_buffer;
+       stats = &rx_adapter->stats;
        ring_lock = &rx_adapter->intr_ring_lock;
 
        if (buf->count >= BATCH_SIZE)
-               rxa_flush_event_buffer(rx_adapter);
+               rxa_flush_event_buffer(rx_adapter, buf, stats);
 
        while (rxa_pkt_buf_available(buf)) {
                struct eth_device_info *dev_info;
@@ -1221,7 +1256,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
                                        continue;
                                n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
                                        rx_adapter->max_nb_rx,
-                                       &rxq_empty);
+                                       &rxq_empty, buf, stats);
                                nb_rx += n;
 
                                enq_buffer_full = !rxq_empty && n == 0;
@@ -1242,7 +1277,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
                } else {
                        n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
                                rx_adapter->max_nb_rx,
-                               &rxq_empty);
+                               &rxq_empty, buf, stats);
                        rx_adapter->qd_valid = !rxq_empty;
                        nb_rx += n;
                        if (nb_rx > rx_adapter->max_nb_rx)
@@ -1252,7 +1287,6 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
 
 done:
        rx_adapter->stats.rx_intr_packets += nb_rx;
-       return nb_rx;
 }
 
 /*
@@ -1268,18 +1302,18 @@ done:
  * the hypervisor's switching layer where adjustments can be made to deal with
  * it.
  */
-static inline uint32_t
-rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+static inline void
+rxa_poll(struct event_eth_rx_adapter *rx_adapter)
 {
        uint32_t num_queue;
        uint32_t nb_rx = 0;
-       struct rte_eth_event_enqueue_buffer *buf;
+       struct eth_event_enqueue_buffer *buf = NULL;
+       struct rte_event_eth_rx_adapter_stats *stats = NULL;
        uint32_t wrr_pos;
        uint32_t max_nb_rx;
 
        wrr_pos = rx_adapter->wrr_pos;
        max_nb_rx = rx_adapter->max_nb_rx;
-       buf = &rx_adapter->event_enqueue_buffer;
 
        /* Iterate through a WRR sequence */
        for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
@@ -1287,40 +1321,48 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
                uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
                uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
 
+               buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
+
                /* Don't do a batch dequeue from the rx queue if there isn't
                 * enough space in the enqueue buffer.
                 */
                if (buf->count >= BATCH_SIZE)
-                       rxa_flush_event_buffer(rx_adapter);
+                       rxa_flush_event_buffer(rx_adapter, buf, stats);
                if (!rxa_pkt_buf_available(buf)) {
-                       rx_adapter->wrr_pos = wrr_pos;
-                       return nb_rx;
+                       if (rx_adapter->use_queue_event_buf)
+                               goto poll_next_entry;
+                       else {
+                               rx_adapter->wrr_pos = wrr_pos;
+                               return;
+                       }
                }
 
                nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
-                               NULL);
+                               NULL, buf, stats);
                if (nb_rx > max_nb_rx) {
                        rx_adapter->wrr_pos =
                                    (wrr_pos + 1) % rx_adapter->wrr_len;
                        break;
                }
 
+poll_next_entry:
                if (++wrr_pos == rx_adapter->wrr_len)
                        wrr_pos = 0;
        }
-       return nb_rx;
 }
 
 static void
 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter = arg;
-       struct rte_eth_event_enqueue_buffer *buf =
-               &rx_adapter->event_enqueue_buffer;
+       struct event_eth_rx_adapter *rx_adapter = arg;
+       struct eth_event_enqueue_buffer *buf = NULL;
+       struct rte_event_eth_rx_adapter_stats *stats = NULL;
        struct rte_event *ev;
 
+       buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
+
        if (buf->count)
-               rxa_flush_event_buffer(rx_adapter);
+               rxa_flush_event_buffer(rx_adapter, buf, stats);
 
        if (vec->vector_ev->nb_elem == 0)
                return;
@@ -1338,8 +1380,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
 static int
 rxa_service_func(void *args)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter = args;
-       struct rte_event_eth_rx_adapter_stats *stats;
+       struct event_eth_rx_adapter *rx_adapter = args;
 
        if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
                return 0;
@@ -1366,10 +1407,11 @@ rxa_service_func(void *args)
                }
        }
 
-       stats = &rx_adapter->stats;
-       stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
-       stats->rx_packets += rxa_poll(rx_adapter);
+       rxa_intr_ring_dequeue(rx_adapter);
+       rxa_poll(rx_adapter);
+
        rte_spinlock_unlock(&rx_adapter->rx_lock);
+
        return 0;
 }
 
@@ -1414,7 +1456,7 @@ rxa_memzone_lookup(void)
        return 0;
 }
 
-static inline struct rte_event_eth_rx_adapter *
+static inline struct event_eth_rx_adapter *
 rxa_id_to_adapter(uint8_t id)
 {
        return event_eth_rx_adapter ?
@@ -1431,7 +1473,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
        int started;
        uint8_t port_id;
        struct rte_event_port_conf *port_conf = arg;
-       struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+       struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
 
        dev = &rte_eventdevs[rx_adapter->eventdev_id];
        dev_conf = dev->data->dev_conf;
@@ -1480,7 +1522,7 @@ rxa_epoll_create1(void)
 }
 
 static int
-rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
 {
        if (rx_adapter->epd != INIT_FD)
                return 0;
@@ -1497,7 +1539,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
        int err;
        char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -1541,7 +1583,7 @@ error:
 }
 
 static int
-rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
 {
        int err;
 
@@ -1562,7 +1604,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
 {
        int ret;
 
@@ -1580,9 +1622,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
 }
 
 static int
-rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       uint16_t rx_queue_id)
+rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
+                struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
        int err;
        uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1610,9 +1651,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int rx_queue_id)
+rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+                  struct eth_device_info *dev_info, int rx_queue_id)
 {
        int err;
        int i;
@@ -1669,9 +1709,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
 }
 
 static int
-rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       uint16_t rx_queue_id)
+rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
+               struct eth_device_info *dev_info, uint16_t rx_queue_id)
 {
        int err, err1;
        uint16_t eth_dev_id = dev_info->dev->data->port_id;
@@ -1759,9 +1798,8 @@ err_free_queue:
 }
 
 static int
-rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       int rx_queue_id)
+rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
+                  struct eth_device_info *dev_info, int rx_queue_id)
 
 {
        int i, j, err;
@@ -1809,9 +1847,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
        return err;
 }
 
-
 static int
-rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
 {
        int ret;
        struct rte_service_spec service;
@@ -1854,10 +1891,9 @@ err_done:
 }
 
 static void
-rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-               struct eth_device_info *dev_info,
-               int32_t rx_queue_id,
-               uint8_t add)
+rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
+                struct eth_device_info *dev_info, int32_t rx_queue_id,
+                uint8_t add)
 {
        struct eth_rx_queue_info *queue_info;
        int enabled;
@@ -1907,9 +1943,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
 }
 
 static void
-rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       int32_t rx_queue_id)
+rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
+          struct eth_device_info *dev_info, int32_t rx_queue_id)
 {
        struct eth_rx_vector_data *vec;
        int pollq;
@@ -1947,13 +1982,23 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
        rx_adapter->num_rx_intr -= intrq;
        dev_info->nb_rx_intr -= intrq;
        dev_info->nb_shared_intr -= intrq && sintrq;
+       if (rx_adapter->use_queue_event_buf) {
+               struct eth_event_enqueue_buffer *event_buf =
+                       dev_info->rx_queue[rx_queue_id].event_buf;
+               struct rte_event_eth_rx_adapter_stats *stats =
+                       dev_info->rx_queue[rx_queue_id].stats;
+               rte_free(event_buf->events);
+               rte_free(event_buf);
+               rte_free(stats);
+               dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+               dev_info->rx_queue[rx_queue_id].stats = NULL;
+       }
 }
 
-static void
-rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
-       struct eth_device_info *dev_info,
-       int32_t rx_queue_id,
-       const struct rte_event_eth_rx_adapter_queue_conf *conf)
+static int
+rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
+             struct eth_device_info *dev_info, int32_t rx_queue_id,
+             const struct rte_event_eth_rx_adapter_queue_conf *conf)
 {
        struct eth_rx_queue_info *queue_info;
        const struct rte_event *ev = &conf->ev;
@@ -1961,15 +2006,22 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
        int intrq;
        int sintrq;
        struct rte_event *qi_ev;
+       struct eth_event_enqueue_buffer *new_rx_buf = NULL;
+       struct rte_event_eth_rx_adapter_stats *stats = NULL;
+       uint16_t eth_dev_id = dev_info->dev->data->port_id;
+       int ret;
 
        if (rx_queue_id == -1) {
                uint16_t nb_rx_queues;
                uint16_t i;
 
                nb_rx_queues = dev_info->dev->data->nb_rx_queues;
-               for (i = 0; i < nb_rx_queues; i++)
-                       rxa_add_queue(rx_adapter, dev_info, i, conf);
-               return;
+               for (i = 0; i < nb_rx_queues; i++) {
+                       ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
+                       if (ret)
+                               return ret;
+               }
+               return 0;
        }
 
        pollq = rxa_polled_queue(dev_info, rx_queue_id);
@@ -2032,12 +2084,58 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
                                dev_info->next_q_idx = 0;
                }
        }
+
+       if (!rx_adapter->use_queue_event_buf)
+               return 0;
+
+       new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
+                               sizeof(*new_rx_buf), 0,
+                               rte_eth_dev_socket_id(eth_dev_id));
+       if (new_rx_buf == NULL) {
+               RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
+                                "dev_id: %d queue_id: %d",
+                                eth_dev_id, rx_queue_id);
+               return -ENOMEM;
+       }
+
+       new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
+       new_rx_buf->events_size += (2 * BATCH_SIZE);
+       new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
+                               sizeof(struct rte_event) *
+                               new_rx_buf->events_size, 0,
+                               rte_eth_dev_socket_id(eth_dev_id));
+       if (new_rx_buf->events == NULL) {
+               rte_free(new_rx_buf);
+               RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
+                                "dev_id: %d queue_id: %d",
+                                eth_dev_id, rx_queue_id);
+               return -ENOMEM;
+       }
+
+       queue_info->event_buf = new_rx_buf;
+
+       /* Allocate storage for adapter queue stats */
+       stats = rte_zmalloc_socket("rx_queue_stats",
+                               sizeof(*stats), 0,
+                               rte_eth_dev_socket_id(eth_dev_id));
+       if (stats == NULL) {
+               rte_free(new_rx_buf->events);
+               rte_free(new_rx_buf);
+               RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
+                                " dev_id: %d queue_id: %d",
+                                eth_dev_id, rx_queue_id);
+               return -ENOMEM;
+       }
+
+       queue_info->stats = stats;
+
+       return 0;
 }
 
-static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
-               uint16_t eth_dev_id,
-               int rx_queue_id,
-               const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+static int
+rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
+          int rx_queue_id,
+          const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
        struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
        struct rte_event_eth_rx_adapter_queue_conf temp_conf;
@@ -2060,6 +2158,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
                        temp_conf.servicing_weight = 1;
                }
                queue_conf = &temp_conf;
+
+               if (queue_conf->servicing_weight == 0 &&
+                   rx_adapter->use_queue_event_buf) {
+
+                       RTE_EDEV_LOG_ERR("Use of queue level event buffer "
+                                        "not supported for interrupt queues "
+                                        "dev_id: %d queue_id: %d",
+                                        eth_dev_id, rx_queue_id);
+                       return -EINVAL;
+               }
        }
 
        nb_rx_queues = dev_info->dev->data->nb_rx_queues;
@@ -2139,7 +2247,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
 
 
 
-       rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+       ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+       if (ret)
+               goto err_free_rxqueue;
        rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
 
        rte_free(rx_adapter->eth_rx_poll);
@@ -2160,13 +2270,13 @@ err_free_rxqueue:
        rte_free(rx_poll);
        rte_free(rx_wrr);
 
-       return 0;
+       return ret;
 }
 
 static int
 rxa_ctrl(uint8_t id, int start)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
        uint32_t i;
@@ -2214,8 +2324,8 @@ rxa_create(uint8_t id, uint8_t dev_id,
           rte_event_eth_rx_adapter_conf_cb conf_cb,
           void *conf_arg)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
-       struct rte_eth_event_enqueue_buffer *buf;
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_event_enqueue_buffer *buf;
        struct rte_event *events;
        int ret;
        int socket_id;
@@ -2286,20 +2396,25 @@ rxa_create(uint8_t id, uint8_t dev_id,
                rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
 
        /* Rx adapter event buffer allocation */
-       buf = &rx_adapter->event_enqueue_buffer;
-       buf->events_size = rxa_params->event_buf_size;
-
-       events = rte_zmalloc_socket(rx_adapter->mem_name,
-                                   buf->events_size * sizeof(*events),
-                                   0, socket_id);
-       if (events == NULL) {
-               RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n");
-               rte_free(rx_adapter->eth_devices);
-               rte_free(rx_adapter);
-               return -ENOMEM;
-       }
+       rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
+
+       if (!rx_adapter->use_queue_event_buf) {
+               buf = &rx_adapter->event_enqueue_buffer;
+               buf->events_size = rxa_params->event_buf_size;
+
+               events = rte_zmalloc_socket(rx_adapter->mem_name,
+                                           buf->events_size * sizeof(*events),
+                                           0, socket_id);
+               if (events == NULL) {
+                       RTE_EDEV_LOG_ERR("Failed to allocate memory "
+                                        "for adapter event buffer");
+                       rte_free(rx_adapter->eth_devices);
+                       rte_free(rx_adapter);
+                       return -ENOMEM;
+               }
 
-       rx_adapter->event_enqueue_buffer.events = events;
+               rx_adapter->event_enqueue_buffer.events = events;
+       }
 
        event_eth_rx_adapter[id] = rx_adapter;
 
@@ -2327,6 +2442,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
 
        /* use default values for adapter params */
        rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
+       rxa_params.use_queue_event_buf = false;
 
        return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
 }
@@ -2343,14 +2459,27 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
        if (port_config == NULL)
                return -EINVAL;
 
-       /* use default values if rxa_params is NULL */
        if (rxa_params == NULL) {
+               /* use default values if rxa_params is NULL */
                rxa_params = &temp_params;
                rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
-       }
-
-       if (rxa_params->event_buf_size == 0)
+               rxa_params->use_queue_event_buf = false;
+       } else if ((!rxa_params->use_queue_event_buf &&
+                   rxa_params->event_buf_size == 0) ||
+                  (rxa_params->use_queue_event_buf &&
+                   rxa_params->event_buf_size != 0)) {
+               RTE_EDEV_LOG_ERR("Invalid adapter params\n");
                return -EINVAL;
+       } else if (!rxa_params->use_queue_event_buf) {
+               /* adjust event buff size with BATCH_SIZE used for fetching
+                * packets from NIC rx queues to get full buffer utilization
+                * and prevent unnecessary rollovers.
+                */
+
+               rxa_params->event_buf_size =
+                       RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
+               rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
+       }
 
        pc = rte_malloc(NULL, sizeof(*pc), 0);
        if (pc == NULL)
@@ -2358,14 +2487,6 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
 
        *pc = *port_config;
 
-       /* adjust event buff size with BATCH_SIZE used for fetching packets
-        * from NIC rx queues to get full buffer utilization and prevent
-        * unnecessary rollovers.
-        */
-       rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size,
-                                              BATCH_SIZE);
-       rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
-
        ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
        if (ret)
                rte_free(pc);
@@ -2401,7 +2522,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
 int
 rte_event_eth_rx_adapter_free(uint8_t id)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
 
        RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
 
@@ -2418,7 +2539,8 @@ rte_event_eth_rx_adapter_free(uint8_t id)
        if (rx_adapter->default_cb_arg)
                rte_free(rx_adapter->conf_arg);
        rte_free(rx_adapter->eth_devices);
-       rte_free(rx_adapter->event_enqueue_buffer.events);
+       if (!rx_adapter->use_queue_event_buf)
+               rte_free(rx_adapter->event_enqueue_buffer.events);
        rte_free(rx_adapter);
        event_eth_rx_adapter[id] = NULL;
 
@@ -2434,7 +2556,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
 {
        int ret;
        uint32_t cap;
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
        struct rte_event_eth_rx_adapter_vector_limits limits;
@@ -2522,6 +2644,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
                return -EINVAL;
        }
 
+       if ((rx_adapter->use_queue_event_buf &&
+            queue_conf->event_buf_size == 0) ||
+           (!rx_adapter->use_queue_event_buf &&
+            queue_conf->event_buf_size != 0)) {
+               RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
+               return -EINVAL;
+       }
+
        dev_info = &rx_adapter->eth_devices[eth_dev_id];
 
        if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
@@ -2586,7 +2716,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
 {
        int ret = 0;
        struct rte_eventdev *dev;
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct eth_device_info *dev_info;
        uint32_t cap;
        uint32_t nb_rx_poll = 0;
@@ -2677,6 +2807,11 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
                rx_adapter->eth_rx_poll = rx_poll;
                rx_adapter->wrr_sched = rx_wrr;
                rx_adapter->wrr_len = nb_wrr;
+               /*
+                * reset next poll start position (wrr_pos) to avoid buffer
+                * overrun when wrr_len is reduced in case of queue delete
+                */
+               rx_adapter->wrr_pos = 0;
                rx_adapter->num_intr_vec += num_intr_vec;
 
                if (dev_info->nb_dev_queues == 0) {
@@ -2752,16 +2887,28 @@ rte_event_eth_rx_adapter_stop(uint8_t id)
        return rxa_ctrl(id, 0);
 }
 
+static inline void
+rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
+{
+       struct rte_event_eth_rx_adapter_stats *q_stats;
+
+       q_stats = queue_info->stats;
+       memset(q_stats, 0, sizeof(*q_stats));
+}
+
 int
 rte_event_eth_rx_adapter_stats_get(uint8_t id,
                               struct rte_event_eth_rx_adapter_stats *stats)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_event_enqueue_buffer *buf;
        struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
        struct rte_event_eth_rx_adapter_stats dev_stats;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
-       uint32_t i;
+       struct eth_rx_queue_info *queue_info;
+       struct rte_event_eth_rx_adapter_stats *q_stats;
+       uint32_t i, j;
        int ret;
 
        if (rxa_memzone_lookup())
@@ -2775,8 +2922,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 
        dev = &rte_eventdevs[rx_adapter->eventdev_id];
        memset(stats, 0, sizeof(*stats));
+
+       if (rx_adapter->service_inited)
+               *stats = rx_adapter->stats;
+
        RTE_ETH_FOREACH_DEV(i) {
                dev_info = &rx_adapter->eth_devices[i];
+
+               if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
+
+                       for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+                            j++) {
+                               queue_info = &dev_info->rx_queue[j];
+                               if (!queue_info->queue_enabled)
+                                       continue;
+                               q_stats = queue_info->stats;
+
+                               stats->rx_packets += q_stats->rx_packets;
+                               stats->rx_poll_count += q_stats->rx_poll_count;
+                               stats->rx_enq_count += q_stats->rx_enq_count;
+                               stats->rx_enq_retry += q_stats->rx_enq_retry;
+                               stats->rx_dropped += q_stats->rx_dropped;
+                               stats->rx_enq_block_cycles +=
+                                               q_stats->rx_enq_block_cycles;
+                       }
+               }
+
                if (dev_info->internal_event_port == 0 ||
                        dev->dev_ops->eth_rx_adapter_stats_get == NULL)
                        continue;
@@ -2789,11 +2960,72 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
                dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
        }
 
-       if (rx_adapter->service_inited)
-               *stats = rx_adapter->stats;
-
+       buf = &rx_adapter->event_enqueue_buffer;
        stats->rx_packets += dev_stats_sum.rx_packets;
        stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+       stats->rx_event_buf_count = buf->count;
+       stats->rx_event_buf_size = buf->events_size;
+
+       return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
+               uint16_t eth_dev_id,
+               uint16_t rx_queue_id,
+               struct rte_event_eth_rx_adapter_queue_stats *stats)
+{
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_device_info *dev_info;
+       struct eth_rx_queue_info *queue_info;
+       struct eth_event_enqueue_buffer *event_buf;
+       struct rte_event_eth_rx_adapter_stats *q_stats;
+       struct rte_eventdev *dev;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       rx_adapter = rxa_id_to_adapter(id);
+
+       if (rx_adapter == NULL || stats == NULL)
+               return -EINVAL;
+
+       if (!rx_adapter->use_queue_event_buf)
+               return -EINVAL;
+
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+               return -EINVAL;
+       }
+
+       dev_info = &rx_adapter->eth_devices[eth_dev_id];
+       if (dev_info->rx_queue == NULL ||
+           !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+               RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+               return -EINVAL;
+       }
+
+       if (dev_info->internal_event_port == 0) {
+               queue_info = &dev_info->rx_queue[rx_queue_id];
+               event_buf = queue_info->event_buf;
+               q_stats = queue_info->stats;
+
+               stats->rx_event_buf_count = event_buf->count;
+               stats->rx_event_buf_size = event_buf->events_size;
+               stats->rx_packets = q_stats->rx_packets;
+               stats->rx_poll_count = q_stats->rx_poll_count;
+               stats->rx_dropped = q_stats->rx_dropped;
+       }
+
+       dev = &rte_eventdevs[rx_adapter->eventdev_id];
+       if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
+               return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
+                                               &rte_eth_devices[eth_dev_id],
+                                               rx_queue_id, stats);
+       }
 
        return 0;
 }
@@ -2801,10 +3033,11 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
 int
 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct rte_eventdev *dev;
        struct eth_device_info *dev_info;
-       uint32_t i;
+       struct eth_rx_queue_info *queue_info;
+       uint32_t i, j;
 
        if (rxa_memzone_lookup())
                return -ENOMEM;
@@ -2816,8 +3049,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
                return -EINVAL;
 
        dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
        RTE_ETH_FOREACH_DEV(i) {
                dev_info = &rx_adapter->eth_devices[i];
+
+               if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
+
+                       for (j = 0; j < dev_info->dev->data->nb_rx_queues;
+                                               j++) {
+                               queue_info = &dev_info->rx_queue[j];
+                               if (!queue_info->queue_enabled)
+                                       continue;
+                               rxa_queue_stats_reset(queue_info);
+                       }
+               }
+
                if (dev_info->internal_event_port == 0 ||
                        dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
                        continue;
@@ -2826,13 +3072,65 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
        }
 
        memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+
+       return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
+               uint16_t eth_dev_id,
+               uint16_t rx_queue_id)
+{
+       struct event_eth_rx_adapter *rx_adapter;
+       struct eth_device_info *dev_info;
+       struct eth_rx_queue_info *queue_info;
+       struct rte_eventdev *dev;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+       rx_adapter = rxa_id_to_adapter(id);
+       if (rx_adapter == NULL)
+               return -EINVAL;
+
+       if (!rx_adapter->use_queue_event_buf)
+               return -EINVAL;
+
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
+               return -EINVAL;
+       }
+
+       dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+       if (dev_info->rx_queue == NULL ||
+           !dev_info->rx_queue[rx_queue_id].queue_enabled) {
+               RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
+               return -EINVAL;
+       }
+
+       if (dev_info->internal_event_port == 0) {
+               queue_info = &dev_info->rx_queue[rx_queue_id];
+               rxa_queue_stats_reset(queue_info);
+       }
+
+       dev = &rte_eventdevs[rx_adapter->eventdev_id];
+       if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
+               return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
+                                               &rte_eth_devices[eth_dev_id],
+                                               rx_queue_id);
+       }
+
        return 0;
 }
 
 int
 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
 
        if (rxa_memzone_lookup())
                return -ENOMEM;
@@ -2849,13 +3147,33 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
        return rx_adapter->service_inited ? 0 : -ESRCH;
 }
 
+int
+rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
+{
+       struct event_eth_rx_adapter *rx_adapter;
+
+       if (rxa_memzone_lookup())
+               return -ENOMEM;
+
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+       rx_adapter = rxa_id_to_adapter(id);
+       if (rx_adapter == NULL || event_port_id == NULL)
+               return -EINVAL;
+
+       if (rx_adapter->service_inited)
+               *event_port_id = rx_adapter->event_port_id;
+
+       return rx_adapter->service_inited ? 0 : -ESRCH;
+}
+
 int
 rte_event_eth_rx_adapter_cb_register(uint8_t id,
                                        uint16_t eth_dev_id,
                                        rte_event_eth_rx_adapter_cb_fn cb_fn,
                                        void *cb_arg)
 {
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct eth_device_info *dev_info;
        uint32_t cap;
        int ret;
@@ -2900,11 +3218,11 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
                        uint16_t rx_queue_id,
                        struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
+#define TICK2NSEC(_ticks, _freq) (((_ticks) * (1E9)) / (_freq))
        struct rte_eventdev *dev;
-       struct rte_event_eth_rx_adapter *rx_adapter;
+       struct event_eth_rx_adapter *rx_adapter;
        struct eth_device_info *dev_info;
        struct eth_rx_queue_info *queue_info;
-       struct rte_event *qi_ev;
        int ret;
 
        if (rxa_memzone_lookup())
@@ -2935,7 +3253,6 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
        }
 
        queue_info = &dev_info->rx_queue[rx_queue_id];
-       qi_ev = (struct rte_event *)&queue_info->event;
 
        memset(queue_conf, 0, sizeof(*queue_conf));
        queue_conf->rx_queue_flags = 0;
@@ -2944,7 +3261,18 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
                        RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
        queue_conf->servicing_weight = queue_info->wt;
 
-       memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
+       queue_conf->ev.event = queue_info->event;
+
+       queue_conf->vector_sz = queue_info->vector_data.max_vector_count;
+       queue_conf->vector_mp = queue_info->vector_data.vector_pool;
+       /* need to be converted from ticks to ns */
+       queue_conf->vector_timeout_ns = TICK2NSEC(
+               queue_info->vector_data.vector_timeout_ticks, rte_get_timer_hz());
+
+       if (queue_info->event_buf != NULL)
+               queue_conf->event_buf_size = queue_info->event_buf->events_size;
+       else
+               queue_conf->event_buf_size = 0;
 
        dev = &rte_eventdevs[rx_adapter->eventdev_id];
        if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
@@ -2957,3 +3285,294 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
 
        return 0;
 }
+
+#define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
+
+static int
+handle_rxa_stats(const char *cmd __rte_unused,
+                const char *params,
+                struct rte_tel_data *d)
+{
+       uint8_t rx_adapter_id;
+       struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       rx_adapter_id = atoi(params);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+       /* Get Rx adapter stats */
+       if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
+                                              &rx_adptr_stats)) {
+               RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
+               return -1;
+       }
+
+       rte_tel_data_start_dict(d);
+       rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+       RXA_ADD_DICT(rx_adptr_stats, rx_packets);
+       RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
+       RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
+       RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
+       RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
+       RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
+       RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
+
+       return 0;
+}
+
+static int
+handle_rxa_stats_reset(const char *cmd __rte_unused,
+                      const char *params,
+                      struct rte_tel_data *d __rte_unused)
+{
+       uint8_t rx_adapter_id;
+
+       if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       rx_adapter_id = atoi(params);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
+
+       /* Reset Rx adapter stats */
+       if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
+               RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+handle_rxa_get_queue_conf(const char *cmd __rte_unused,
+                         const char *params,
+                         struct rte_tel_data *d)
+{
+       uint8_t rx_adapter_id;
+       uint16_t rx_queue_id;
+       int eth_dev_id, ret = -1;
+       char *token, *l_params;
+       struct rte_event_eth_rx_adapter_queue_conf queue_conf;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       l_params = strdup(params);
+       if (l_params == NULL)
+               return -ENOMEM;
+       token = strtok(l_params, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+       rx_adapter_id = strtoul(token, NULL, 10);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+
+       /* Get device ID from parameter string */
+       eth_dev_id = strtoul(token, NULL, 10);
+       RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+
+       /* Get Rx queue ID from parameter string */
+       rx_queue_id = strtoul(token, NULL, 10);
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       token = strtok(NULL, "\0");
+       if (token != NULL)
+               RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+                                " telemetry command, ignoring");
+       /* Parsing parameter finished */
+       free(l_params);
+
+       if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
+                                                   rx_queue_id, &queue_conf)) {
+               RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
+               return -1;
+       }
+
+       rte_tel_data_start_dict(d);
+       rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+       rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+       rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+       RXA_ADD_DICT(queue_conf, rx_queue_flags);
+       RXA_ADD_DICT(queue_conf, servicing_weight);
+       RXA_ADD_DICT(queue_conf.ev, queue_id);
+       RXA_ADD_DICT(queue_conf.ev, sched_type);
+       RXA_ADD_DICT(queue_conf.ev, priority);
+       RXA_ADD_DICT(queue_conf.ev, flow_id);
+
+       return 0;
+
+error:
+       free(l_params);
+       return ret;
+}
+
+static int
+handle_rxa_get_queue_stats(const char *cmd __rte_unused,
+                          const char *params,
+                          struct rte_tel_data *d)
+{
+       uint8_t rx_adapter_id;
+       uint16_t rx_queue_id;
+       int eth_dev_id, ret = -1;
+       char *token, *l_params;
+       struct rte_event_eth_rx_adapter_queue_stats q_stats;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       l_params = strdup(params);
+       if (l_params == NULL)
+               return -ENOMEM;
+       token = strtok(l_params, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+       rx_adapter_id = strtoul(token, NULL, 10);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+
+       /* Get device ID from parameter string */
+       eth_dev_id = strtoul(token, NULL, 10);
+       RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+
+       /* Get Rx queue ID from parameter string */
+       rx_queue_id = strtoul(token, NULL, 10);
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       token = strtok(NULL, "\0");
+       if (token != NULL)
+               RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+                                " telemetry command, ignoring");
+       /* Parsing parameter finished */
+       free(l_params);
+
+       if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
+                                                   rx_queue_id, &q_stats)) {
+               RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
+               return -1;
+       }
+
+       rte_tel_data_start_dict(d);
+       rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
+       rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
+       rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
+       RXA_ADD_DICT(q_stats, rx_event_buf_count);
+       RXA_ADD_DICT(q_stats, rx_event_buf_size);
+       RXA_ADD_DICT(q_stats, rx_poll_count);
+       RXA_ADD_DICT(q_stats, rx_packets);
+       RXA_ADD_DICT(q_stats, rx_dropped);
+
+       return 0;
+
+error:
+       free(l_params);
+       return ret;
+}
+
+static int
+handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
+                            const char *params,
+                            struct rte_tel_data *d __rte_unused)
+{
+       uint8_t rx_adapter_id;
+       uint16_t rx_queue_id;
+       int eth_dev_id, ret = -1;
+       char *token, *l_params;
+
+       if (params == NULL || strlen(params) == 0 || !isdigit(*params))
+               return -1;
+
+       /* Get Rx adapter ID from parameter string */
+       l_params = strdup(params);
+       if (l_params == NULL)
+               return -ENOMEM;
+       token = strtok(l_params, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+       rx_adapter_id = strtoul(token, NULL, 10);
+       RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+
+       /* Get device ID from parameter string */
+       eth_dev_id = strtoul(token, NULL, 10);
+       RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
+
+       token = strtok(NULL, ",");
+       RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
+
+       /* Get Rx queue ID from parameter string */
+       rx_queue_id = strtoul(token, NULL, 10);
+       if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+               RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       token = strtok(NULL, "\0");
+       if (token != NULL)
+               RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
+                                " telemetry command, ignoring");
+       /* Parsing parameter finished */
+       free(l_params);
+
+       if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
+                                                      eth_dev_id,
+                                                      rx_queue_id)) {
+               RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
+               return -1;
+       }
+
+       return 0;
+
+error:
+       free(l_params);
+       return ret;
+}
+
+RTE_INIT(rxa_init_telemetry)
+{
+       rte_telemetry_register_cmd("/eventdev/rxa_stats",
+               handle_rxa_stats,
+               "Returns Rx adapter stats. Parameter: rxa_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
+               handle_rxa_stats_reset,
+               "Reset Rx adapter stats. Parameter: rxa_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
+               handle_rxa_get_queue_conf,
+               "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
+               handle_rxa_get_queue_stats,
+               "Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+
+       rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
+               handle_rxa_queue_stats_reset,
+               "Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
+}