X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Feventdev%2Frte_event_eth_rx_adapter.c;h=ae1e260c083151243bde3ae0fd3de08586d17682;hb=6e858b4d9244cf53505589673755ab18ac2a4a83;hp=c2d824275d7c03f1157d64d69df15828925c1246;hpb=814d017093285c2dd1a1d2176781839b0c1666ee;p=dpdk.git diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index c2d824275d..ae1e260c08 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -22,7 +22,7 @@ #include "rte_eventdev.h" #include "eventdev_pmd.h" -#include "rte_eventdev_trace.h" +#include "eventdev_trace.h" #include "rte_event_eth_rx_adapter.h" #define BATCH_SIZE 32 @@ -82,7 +82,7 @@ struct eth_rx_vector_data { TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data); /* Instance per adapter */ -struct rte_eth_event_enqueue_buffer { +struct eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ @@ -98,7 +98,7 @@ struct rte_eth_event_enqueue_buffer { uint16_t last_mask; }; -struct rte_event_eth_rx_adapter { +struct event_eth_rx_adapter { /* RSS key */ uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ @@ -124,7 +124,7 @@ struct rte_event_eth_rx_adapter { /* Next entry in wrr[] to begin polling */ uint32_t wrr_pos; /* Event burst buffer */ - struct rte_eth_event_enqueue_buffer event_enqueue_buffer; + struct eth_event_enqueue_buffer event_enqueue_buffer; /* Vector enable flag */ uint8_t ena_vector; /* Timestamp of previous vector expiry list traversal */ @@ -244,10 +244,14 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; - struct rte_eth_event_enqueue_buffer *event_buf; + struct eth_event_enqueue_buffer *event_buf; + /* use adapter stats struct for queue level stats, + * as same stats need to be updated for adapter and queue + */ + struct rte_event_eth_rx_adapter_stats *stats; }; -static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; +static struct event_eth_rx_adapter **event_eth_rx_adapter; /* Enable dynamic timestamp field in mbuf */ static uint64_t event_eth_rx_timestamp_dynflag; @@ -266,16 +270,20 @@ rxa_validate_id(uint8_t id) return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; } -static inline struct rte_eth_event_enqueue_buffer * -rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, - uint16_t eth_dev_id, uint16_t rx_queue_id) +static inline struct eth_event_enqueue_buffer * +rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, + uint16_t rx_queue_id, + struct rte_event_eth_rx_adapter_stats **stats) { if (rx_adapter->use_queue_event_buf) { struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; + *stats = dev_info->rx_queue[rx_queue_id].stats; return dev_info->rx_queue[rx_queue_id].event_buf; - } else + } else { + *stats = &rx_adapter->stats; return &rx_adapter->event_enqueue_buffer; + } } #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ @@ -286,7 +294,7 @@ rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, } while (0) static inline int -rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter) { return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr; } @@ -304,10 +312,9 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b) * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling */ static int -rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter, - unsigned int n, int *cw, - struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt, - uint16_t gcd, int prev) +rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw, + struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt, + uint16_t gcd, int prev) { int i = prev; uint16_t w; @@ -412,10 +419,9 @@ rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add) /* Calculate nb_rx_intr after deleting interrupt mode rx queues */ static void -rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id, - uint32_t *nb_rx_intr) +rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id, + uint32_t *nb_rx_intr) { uint32_t intr_diff; @@ -431,12 +437,10 @@ rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter, * interrupt queues could currently be poll mode Rx queues */ static void -rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id, - uint32_t *nb_rx_poll, - uint32_t *nb_rx_intr, - uint32_t *nb_wrr) +rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id, + uint32_t *nb_rx_poll, uint32_t *nb_rx_intr, + uint32_t *nb_wrr) { uint32_t intr_diff; uint32_t poll_diff; @@ -463,11 +467,9 @@ rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter, * after deleting poll mode rx queues */ static void -rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id, - uint32_t *nb_rx_poll, - uint32_t *nb_wrr) +rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id, + uint32_t *nb_rx_poll, uint32_t *nb_wrr) { uint32_t poll_diff; uint32_t wrr_len_diff; @@ -488,13 +490,10 @@ rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter, /* Calculate nb_rx_* after adding poll mode rx queues */ static void -rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id, - uint16_t wt, - uint32_t *nb_rx_poll, - uint32_t *nb_rx_intr, - uint32_t *nb_wrr) +rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id, + uint16_t wt, uint32_t *nb_rx_poll, + uint32_t *nb_rx_intr, uint32_t *nb_wrr) { uint32_t intr_diff; uint32_t poll_diff; @@ -521,13 +520,10 @@ rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter, /* Calculate nb_rx_* after adding rx_queue_id */ static void -rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id, - uint16_t wt, - uint32_t *nb_rx_poll, - uint32_t *nb_rx_intr, - uint32_t *nb_wrr) +rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id, + uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr, + uint32_t *nb_wrr) { if (wt != 0) rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id, @@ -539,12 +535,10 @@ rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter, /* Calculate nb_rx_* after deleting rx_queue_id */ static void -rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id, - uint32_t *nb_rx_poll, - uint32_t *nb_rx_intr, - uint32_t *nb_wrr) +rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id, + uint32_t *nb_rx_poll, uint32_t *nb_rx_intr, + uint32_t *nb_wrr) { rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll, nb_wrr); @@ -556,8 +550,7 @@ rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter, * Allocate the rx_poll array */ static struct eth_rx_poll_entry * -rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter, - uint32_t num_rx_polled) +rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled) { size_t len; @@ -573,7 +566,7 @@ rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter, * Allocate the WRR array */ static uint32_t * -rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr) +rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr) { size_t len; @@ -586,11 +579,9 @@ rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr) } static int -rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter, - uint32_t nb_poll, - uint32_t nb_wrr, - struct eth_rx_poll_entry **rx_poll, - uint32_t **wrr_sched) +rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll, + uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll, + uint32_t **wrr_sched) { if (nb_poll == 0) { @@ -615,9 +606,8 @@ rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter, /* Precalculate WRR polling sequence for all queues in rx_adapter */ static void -rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_rx_poll_entry *rx_poll, - uint32_t *rx_wrr) +rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter, + struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr) { uint16_t d; uint16_t q; @@ -744,13 +734,13 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be) } static inline int -rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter) { return !!rx_adapter->enq_block_count; } static inline void -rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter) { if (rx_adapter->rx_enq_block_start_ts) return; @@ -763,8 +753,8 @@ rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter) } static inline void -rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, - struct rte_event_eth_rx_adapter_stats *stats) +rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter, + struct rte_event_eth_rx_adapter_stats *stats) { if (unlikely(!stats->rx_enq_start_ts)) stats->rx_enq_start_ts = rte_get_tsc_cycles(); @@ -783,23 +773,29 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, - struct rte_eth_event_enqueue_buffer *buf) +rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter, + struct eth_event_enqueue_buffer *buf, + struct rte_event_eth_rx_adapter_stats *stats) { - struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; - uint16_t count = buf->last ? buf->last - buf->head : buf->count; + uint16_t count = buf->count; + uint16_t n = 0; if (!count) return 0; - uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id, - rx_adapter->event_port_id, - &buf->events[buf->head], - count); - if (n != count) - stats->rx_enq_retry++; + if (buf->last) + count = buf->last - buf->head; + + if (count) { + n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id, + rx_adapter->event_port_id, + &buf->events[buf->head], + count); + if (n != count) + stats->rx_enq_retry++; - buf->head += n; + buf->head += n; + } if (buf->last && n == count) { uint16_t n1; @@ -828,7 +824,7 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, } static inline void -rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter, +rxa_init_vector(struct event_eth_rx_adapter *rx_adapter, struct eth_rx_vector_data *vec) { vec->vector_ev->nb_elem = 0; @@ -839,9 +835,9 @@ rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter, } static inline uint16_t -rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter, +rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter, struct eth_rx_queue_info *queue_info, - struct rte_eth_event_enqueue_buffer *buf, + struct eth_event_enqueue_buffer *buf, struct rte_mbuf **mbufs, uint16_t num) { struct rte_event *ev = &buf->events[buf->count]; @@ -899,12 +895,10 @@ rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter, } static inline void -rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, - uint16_t eth_dev_id, - uint16_t rx_queue_id, - struct rte_mbuf **mbufs, - uint16_t num, - struct rte_eth_event_enqueue_buffer *buf) +rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, + uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num, + struct eth_event_enqueue_buffer *buf, + struct rte_event_eth_rx_adapter_stats *stats) { uint32_t i; struct eth_device_info *dev_info = @@ -926,14 +920,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, ts = m->ol_flags & event_eth_rx_timestamp_dynflag ? 0 : rte_get_tsc_cycles(); - /* 0xffff ffff ffff ffff if PKT_RX_TIMESTAMP is set, + /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set, * otherwise 0 */ ts_mask = (uint64_t)(!(m->ol_flags & event_eth_rx_timestamp_dynflag)) - 1ULL; - /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */ - rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1); + /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */ + rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1); do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask; for (i = 0; i < num; i++) { struct rte_event *ev; @@ -975,7 +969,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, else num = nb_cb; if (dropped) - rx_adapter->stats.rx_dropped += dropped; + stats->rx_dropped += dropped; } buf->count += num; @@ -983,7 +977,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, } static inline bool -rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) +rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf) { uint32_t nb_req = buf->tail + BATCH_SIZE; @@ -1004,19 +998,15 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) /* Enqueue packets from to event buffer */ static inline uint32_t -rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, - uint16_t port_id, - uint16_t queue_id, - uint32_t rx_count, - uint32_t max_rx, - int *rxq_empty, - struct rte_eth_event_enqueue_buffer *buf) +rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id, + uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, + int *rxq_empty, struct eth_event_enqueue_buffer *buf, + struct rte_event_eth_rx_adapter_stats *stats) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_event_eth_rx_adapter_stats *stats = - &rx_adapter->stats; uint16_t n; uint32_t nb_rx = 0; + uint32_t nb_flushed = 0; if (rxq_empty) *rxq_empty = 0; @@ -1025,7 +1015,8 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter, buf); + nb_flushed += + rxa_flush_event_buffer(rx_adapter, buf, stats); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -1034,21 +1025,26 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf, + stats); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter, buf); + nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats); + + stats->rx_packets += nb_rx; + if (nb_flushed == 0) + rte_event_maintain(rx_adapter->eventdev_id, + rx_adapter->event_port_id, 0); return nb_rx; } static inline void -rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter, - void *data) +rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data) { uint16_t port_id; uint16_t queue; @@ -1088,8 +1084,8 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter, } static int -rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter, - uint32_t num_intr_vec) +rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter, + uint32_t num_intr_vec) { if (rx_adapter->num_intr_vec + num_intr_vec > RTE_EVENT_ETH_INTR_RING_SIZE) { @@ -1104,9 +1100,9 @@ rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter, /* Delete entries for (dev, queue) from the interrupt ring */ static void -rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - uint16_t rx_queue_id) +rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, + uint16_t rx_queue_id) { int i, n; union queue_data qd; @@ -1139,7 +1135,7 @@ rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter, static void * rxa_intr_thread(void *arg) { - struct rte_event_eth_rx_adapter *rx_adapter = arg; + struct event_eth_rx_adapter *rx_adapter = arg; struct rte_epoll_event *epoll_events = rx_adapter->epoll_events; int n, i; @@ -1161,28 +1157,30 @@ rxa_intr_thread(void *arg) /* Dequeue from interrupt ring and enqueue received * mbufs to eventdev */ -static inline uint32_t -rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) +static inline void +rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter) { uint32_t n; uint32_t nb_rx = 0; int rxq_empty; - struct rte_eth_event_enqueue_buffer *buf; + struct eth_event_enqueue_buffer *buf; + struct rte_event_eth_rx_adapter_stats *stats; rte_spinlock_t *ring_lock; uint8_t max_done = 0; if (rx_adapter->num_rx_intr == 0) - return 0; + return; if (rte_ring_count(rx_adapter->intr_ring) == 0 && !rx_adapter->qd_valid) - return 0; + return; buf = &rx_adapter->event_enqueue_buffer; + stats = &rx_adapter->stats; ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter, buf); + rxa_flush_event_buffer(rx_adapter, buf, stats); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1234,7 +1232,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty, buf); + &rxq_empty, buf, stats); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1255,7 +1253,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty, buf); + &rxq_empty, buf, stats); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1265,7 +1263,6 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) done: rx_adapter->stats.rx_intr_packets += nb_rx; - return nb_rx; } /* @@ -1281,12 +1278,13 @@ done: * the hypervisor's switching layer where adjustments can be made to deal with * it. */ -static inline uint32_t -rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) +static inline void +rxa_poll(struct event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf = NULL; + struct eth_event_enqueue_buffer *buf = NULL; + struct rte_event_eth_rx_adapter_stats *stats = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; @@ -1299,24 +1297,24 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; - buf = rxa_event_buf_get(rx_adapter, d, qid); + buf = rxa_event_buf_get(rx_adapter, d, qid, &stats); /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter, buf); + rxa_flush_event_buffer(rx_adapter, buf, stats); if (!rxa_pkt_buf_available(buf)) { if (rx_adapter->use_queue_event_buf) goto poll_next_entry; else { rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + return; } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL, buf); + NULL, buf, stats); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; @@ -1327,20 +1325,20 @@ poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } - return nb_rx; } static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { - struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = NULL; + struct event_eth_rx_adapter *rx_adapter = arg; + struct eth_event_enqueue_buffer *buf = NULL; + struct rte_event_eth_rx_adapter_stats *stats = NULL; struct rte_event *ev; - buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats); if (buf->count) - rxa_flush_event_buffer(rx_adapter, buf); + rxa_flush_event_buffer(rx_adapter, buf, stats); if (vec->vector_ev->nb_elem == 0) return; @@ -1358,8 +1356,7 @@ rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) static int rxa_service_func(void *args) { - struct rte_event_eth_rx_adapter *rx_adapter = args; - struct rte_event_eth_rx_adapter_stats *stats; + struct event_eth_rx_adapter *rx_adapter = args; if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0) return 0; @@ -1386,10 +1383,11 @@ rxa_service_func(void *args) } } - stats = &rx_adapter->stats; - stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter); - stats->rx_packets += rxa_poll(rx_adapter); + rxa_intr_ring_dequeue(rx_adapter); + rxa_poll(rx_adapter); + rte_spinlock_unlock(&rx_adapter->rx_lock); + return 0; } @@ -1434,7 +1432,7 @@ rxa_memzone_lookup(void) return 0; } -static inline struct rte_event_eth_rx_adapter * +static inline struct event_eth_rx_adapter * rxa_id_to_adapter(uint8_t id) { return event_eth_rx_adapter ? @@ -1451,7 +1449,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id, int started; uint8_t port_id; struct rte_event_port_conf *port_conf = arg; - struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id); + struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id); dev = &rte_eventdevs[rx_adapter->eventdev_id]; dev_conf = dev->data->dev_conf; @@ -1500,7 +1498,7 @@ rxa_epoll_create1(void) } static int -rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_init_epd(struct event_eth_rx_adapter *rx_adapter) { if (rx_adapter->epd != INIT_FD) return 0; @@ -1517,7 +1515,7 @@ rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter) } static int -rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter) { int err; char thread_name[RTE_MAX_THREAD_NAME_LEN]; @@ -1561,7 +1559,7 @@ error: } static int -rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter) { int err; @@ -1582,7 +1580,7 @@ rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter) } static int -rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter) { int ret; @@ -1600,9 +1598,8 @@ rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter) } static int -rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - uint16_t rx_queue_id) +rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, uint16_t rx_queue_id) { int err; uint16_t eth_dev_id = dev_info->dev->data->port_id; @@ -1630,9 +1627,8 @@ rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter, } static int -rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id) +rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id) { int err; int i; @@ -1689,9 +1685,8 @@ rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter, } static int -rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - uint16_t rx_queue_id) +rxa_config_intr(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, uint16_t rx_queue_id) { int err, err1; uint16_t eth_dev_id = dev_info->dev->data->port_id; @@ -1779,9 +1774,8 @@ err_free_queue: } static int -rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int rx_queue_id) +rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int rx_queue_id) { int i, j, err; @@ -1829,9 +1823,8 @@ rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter, return err; } - static int -rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id) +rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id) { int ret; struct rte_service_spec service; @@ -1874,10 +1867,9 @@ err_done: } static void -rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int32_t rx_queue_id, - uint8_t add) +rxa_update_queue(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int32_t rx_queue_id, + uint8_t add) { struct eth_rx_queue_info *queue_info; int enabled; @@ -1927,9 +1919,8 @@ rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count, } static void -rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int32_t rx_queue_id) +rxa_sw_del(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int32_t rx_queue_id) { struct eth_rx_vector_data *vec; int pollq; @@ -1968,19 +1959,22 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; if (rx_adapter->use_queue_event_buf) { - struct rte_eth_event_enqueue_buffer *event_buf = + struct eth_event_enqueue_buffer *event_buf = dev_info->rx_queue[rx_queue_id].event_buf; + struct rte_event_eth_rx_adapter_stats *stats = + dev_info->rx_queue[rx_queue_id].stats; rte_free(event_buf->events); rte_free(event_buf); + rte_free(stats); dev_info->rx_queue[rx_queue_id].event_buf = NULL; + dev_info->rx_queue[rx_queue_id].stats = NULL; } } static int -rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, - struct eth_device_info *dev_info, - int32_t rx_queue_id, - const struct rte_event_eth_rx_adapter_queue_conf *conf) +rxa_add_queue(struct event_eth_rx_adapter *rx_adapter, + struct eth_device_info *dev_info, int32_t rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *conf) { struct eth_rx_queue_info *queue_info; const struct rte_event *ev = &conf->ev; @@ -1988,7 +1982,8 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; - struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + struct eth_event_enqueue_buffer *new_rx_buf = NULL; + struct rte_event_eth_rx_adapter_stats *stats = NULL; uint16_t eth_dev_id = dev_info->dev->data->port_id; int ret; @@ -2095,13 +2090,28 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, queue_info->event_buf = new_rx_buf; + /* Allocate storage for adapter queue stats */ + stats = rte_zmalloc_socket("rx_queue_stats", + sizeof(*stats), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (stats == NULL) { + rte_free(new_rx_buf->events); + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate stats storage for" + " dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->stats = stats; + return 0; } -static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - uint16_t eth_dev_id, - int rx_queue_id, - const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +static int +rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, + int rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct rte_event_eth_rx_adapter_queue_conf temp_conf; @@ -2242,7 +2252,7 @@ err_free_rxqueue: static int rxa_ctrl(uint8_t id, int start) { - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; struct rte_eventdev *dev; struct eth_device_info *dev_info; uint32_t i; @@ -2290,8 +2300,8 @@ rxa_create(uint8_t id, uint8_t dev_id, rte_event_eth_rx_adapter_conf_cb conf_cb, void *conf_arg) { - struct rte_event_eth_rx_adapter *rx_adapter; - struct rte_eth_event_enqueue_buffer *buf; + struct event_eth_rx_adapter *rx_adapter; + struct eth_event_enqueue_buffer *buf; struct rte_event *events; int ret; int socket_id; @@ -2488,7 +2498,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_free(uint8_t id) { - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); @@ -2522,7 +2532,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, { int ret; uint32_t cap; - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; struct rte_eventdev *dev; struct eth_device_info *dev_info; struct rte_event_eth_rx_adapter_vector_limits limits; @@ -2682,7 +2692,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id, { int ret = 0; struct rte_eventdev *dev; - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; struct eth_device_info *dev_info; uint32_t cap; uint32_t nb_rx_poll = 0; @@ -2773,6 +2783,11 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id, rx_adapter->eth_rx_poll = rx_poll; rx_adapter->wrr_sched = rx_wrr; rx_adapter->wrr_len = nb_wrr; + /* + * reset next poll start position (wrr_pos) to avoid buffer + * overrun when wrr_len is reduced in case of queue delete + */ + rx_adapter->wrr_pos = 0; rx_adapter->num_intr_vec += num_intr_vec; if (dev_info->nb_dev_queues == 0) { @@ -2848,17 +2863,28 @@ rte_event_eth_rx_adapter_stop(uint8_t id) return rxa_ctrl(id, 0); } +static inline void +rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info) +{ + struct rte_event_eth_rx_adapter_stats *q_stats; + + q_stats = queue_info->stats; + memset(q_stats, 0, sizeof(*q_stats)); +} + int rte_event_eth_rx_adapter_stats_get(uint8_t id, struct rte_event_eth_rx_adapter_stats *stats) { - struct rte_event_eth_rx_adapter *rx_adapter; - struct rte_eth_event_enqueue_buffer *buf; + struct event_eth_rx_adapter *rx_adapter; + struct eth_event_enqueue_buffer *buf; struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 }; struct rte_event_eth_rx_adapter_stats dev_stats; struct rte_eventdev *dev; struct eth_device_info *dev_info; - uint32_t i; + struct eth_rx_queue_info *queue_info; + struct rte_event_eth_rx_adapter_stats *q_stats; + uint32_t i, j; int ret; if (rxa_memzone_lookup()) @@ -2872,8 +2898,32 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, dev = &rte_eventdevs[rx_adapter->eventdev_id]; memset(stats, 0, sizeof(*stats)); + + if (rx_adapter->service_inited) + *stats = rx_adapter->stats; + RTE_ETH_FOREACH_DEV(i) { dev_info = &rx_adapter->eth_devices[i]; + + if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) { + + for (j = 0; j < dev_info->dev->data->nb_rx_queues; + j++) { + queue_info = &dev_info->rx_queue[j]; + if (!queue_info->queue_enabled) + continue; + q_stats = queue_info->stats; + + stats->rx_packets += q_stats->rx_packets; + stats->rx_poll_count += q_stats->rx_poll_count; + stats->rx_enq_count += q_stats->rx_enq_count; + stats->rx_enq_retry += q_stats->rx_enq_retry; + stats->rx_dropped += q_stats->rx_dropped; + stats->rx_enq_block_cycles += + q_stats->rx_enq_block_cycles; + } + } + if (dev_info->internal_event_port == 0 || dev->dev_ops->eth_rx_adapter_stats_get == NULL) continue; @@ -2886,19 +2936,71 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count; } - if (rx_adapter->service_inited) - *stats = rx_adapter->stats; - + buf = &rx_adapter->event_enqueue_buffer; stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + stats->rx_event_buf_count = buf->count; + stats->rx_event_buf_size = buf->events_size; - if (!rx_adapter->use_queue_event_buf) { - buf = &rx_adapter->event_enqueue_buffer; - stats->rx_event_buf_count = buf->count; - stats->rx_event_buf_size = buf->events_size; - } else { - stats->rx_event_buf_count = 0; - stats->rx_event_buf_size = 0; + return 0; +} + +int +rte_event_eth_rx_adapter_queue_stats_get(uint8_t id, + uint16_t eth_dev_id, + uint16_t rx_queue_id, + struct rte_event_eth_rx_adapter_queue_stats *stats) +{ + struct event_eth_rx_adapter *rx_adapter; + struct eth_device_info *dev_info; + struct eth_rx_queue_info *queue_info; + struct eth_event_enqueue_buffer *event_buf; + struct rte_event_eth_rx_adapter_stats *q_stats; + struct rte_eventdev *dev; + + if (rxa_memzone_lookup()) + return -ENOMEM; + + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); + + rx_adapter = rxa_id_to_adapter(id); + + if (rx_adapter == NULL || stats == NULL) + return -EINVAL; + + if (!rx_adapter->use_queue_event_buf) + return -EINVAL; + + if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { + RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id); + return -EINVAL; + } + + dev_info = &rx_adapter->eth_devices[eth_dev_id]; + if (dev_info->rx_queue == NULL || + !dev_info->rx_queue[rx_queue_id].queue_enabled) { + RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id); + return -EINVAL; + } + + if (dev_info->internal_event_port == 0) { + queue_info = &dev_info->rx_queue[rx_queue_id]; + event_buf = queue_info->event_buf; + q_stats = queue_info->stats; + + stats->rx_event_buf_count = event_buf->count; + stats->rx_event_buf_size = event_buf->events_size; + stats->rx_packets = q_stats->rx_packets; + stats->rx_poll_count = q_stats->rx_poll_count; + stats->rx_dropped = q_stats->rx_dropped; + } + + dev = &rte_eventdevs[rx_adapter->eventdev_id]; + if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) { + return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev, + &rte_eth_devices[eth_dev_id], + rx_queue_id, stats); } return 0; @@ -2907,10 +3009,11 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, int rte_event_eth_rx_adapter_stats_reset(uint8_t id) { - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; struct rte_eventdev *dev; struct eth_device_info *dev_info; - uint32_t i; + struct eth_rx_queue_info *queue_info; + uint32_t i, j; if (rxa_memzone_lookup()) return -ENOMEM; @@ -2922,8 +3025,21 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id) return -EINVAL; dev = &rte_eventdevs[rx_adapter->eventdev_id]; + RTE_ETH_FOREACH_DEV(i) { dev_info = &rx_adapter->eth_devices[i]; + + if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) { + + for (j = 0; j < dev_info->dev->data->nb_rx_queues; + j++) { + queue_info = &dev_info->rx_queue[j]; + if (!queue_info->queue_enabled) + continue; + rxa_queue_stats_reset(queue_info); + } + } + if (dev_info->internal_event_port == 0 || dev->dev_ops->eth_rx_adapter_stats_reset == NULL) continue; @@ -2932,13 +3048,65 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id) } memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats)); + + return 0; +} + +int +rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id, + uint16_t eth_dev_id, + uint16_t rx_queue_id) +{ + struct event_eth_rx_adapter *rx_adapter; + struct eth_device_info *dev_info; + struct eth_rx_queue_info *queue_info; + struct rte_eventdev *dev; + + if (rxa_memzone_lookup()) + return -ENOMEM; + + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); + + rx_adapter = rxa_id_to_adapter(id); + if (rx_adapter == NULL) + return -EINVAL; + + if (!rx_adapter->use_queue_event_buf) + return -EINVAL; + + if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { + RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id); + return -EINVAL; + } + + dev_info = &rx_adapter->eth_devices[eth_dev_id]; + + if (dev_info->rx_queue == NULL || + !dev_info->rx_queue[rx_queue_id].queue_enabled) { + RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id); + return -EINVAL; + } + + if (dev_info->internal_event_port == 0) { + queue_info = &dev_info->rx_queue[rx_queue_id]; + rxa_queue_stats_reset(queue_info); + } + + dev = &rte_eventdevs[rx_adapter->eventdev_id]; + if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) { + return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev, + &rte_eth_devices[eth_dev_id], + rx_queue_id); + } + return 0; } int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id) { - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; if (rxa_memzone_lookup()) return -ENOMEM; @@ -2955,13 +3123,33 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id) return rx_adapter->service_inited ? 0 : -ESRCH; } +int +rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id) +{ + struct event_eth_rx_adapter *rx_adapter; + + if (rxa_memzone_lookup()) + return -ENOMEM; + + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + rx_adapter = rxa_id_to_adapter(id); + if (rx_adapter == NULL || event_port_id == NULL) + return -EINVAL; + + if (rx_adapter->service_inited) + *event_port_id = rx_adapter->event_port_id; + + return rx_adapter->service_inited ? 0 : -ESRCH; +} + int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id, rte_event_eth_rx_adapter_cb_fn cb_fn, void *cb_arg) { - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; struct eth_device_info *dev_info; uint32_t cap; int ret; @@ -3007,7 +3195,7 @@ rte_event_eth_rx_adapter_queue_conf_get(uint8_t id, struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { struct rte_eventdev *dev; - struct rte_event_eth_rx_adapter *rx_adapter; + struct event_eth_rx_adapter *rx_adapter; struct eth_device_info *dev_info; struct eth_rx_queue_info *queue_info; struct rte_event *qi_ev; @@ -3170,7 +3358,7 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused, token = strtok(NULL, "\0"); if (token != NULL) RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev" - " telemetry command, igrnoring"); + " telemetry command, ignoring"); if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id, rx_queue_id, &queue_conf)) { @@ -3192,6 +3380,122 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused, return 0; } +static int +handle_rxa_get_queue_stats(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d) +{ + uint8_t rx_adapter_id; + uint16_t rx_queue_id; + int eth_dev_id; + char *token, *l_params; + struct rte_event_eth_rx_adapter_queue_stats q_stats; + + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; + + /* Get Rx adapter ID from parameter string */ + l_params = strdup(params); + token = strtok(l_params, ","); + rx_adapter_id = strtoul(token, NULL, 10); + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL); + + token = strtok(NULL, ","); + if (token == NULL || strlen(token) == 0 || !isdigit(*token)) + return -1; + + /* Get device ID from parameter string */ + eth_dev_id = strtoul(token, NULL, 10); + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL); + + token = strtok(NULL, ","); + if (token == NULL || strlen(token) == 0 || !isdigit(*token)) + return -1; + + /* Get Rx queue ID from parameter string */ + rx_queue_id = strtoul(token, NULL, 10); + if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { + RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id); + return -EINVAL; + } + + token = strtok(NULL, "\0"); + if (token != NULL) + RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev" + " telemetry command, ignoring"); + + if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id, + rx_queue_id, &q_stats)) { + RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats"); + return -1; + } + + rte_tel_data_start_dict(d); + rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id); + rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id); + rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id); + RXA_ADD_DICT(q_stats, rx_event_buf_count); + RXA_ADD_DICT(q_stats, rx_event_buf_size); + RXA_ADD_DICT(q_stats, rx_poll_count); + RXA_ADD_DICT(q_stats, rx_packets); + RXA_ADD_DICT(q_stats, rx_dropped); + + return 0; +} + +static int +handle_rxa_queue_stats_reset(const char *cmd __rte_unused, + const char *params, + struct rte_tel_data *d __rte_unused) +{ + uint8_t rx_adapter_id; + uint16_t rx_queue_id; + int eth_dev_id; + char *token, *l_params; + + if (params == NULL || strlen(params) == 0 || !isdigit(*params)) + return -1; + + /* Get Rx adapter ID from parameter string */ + l_params = strdup(params); + token = strtok(l_params, ","); + rx_adapter_id = strtoul(token, NULL, 10); + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL); + + token = strtok(NULL, ","); + if (token == NULL || strlen(token) == 0 || !isdigit(*token)) + return -1; + + /* Get device ID from parameter string */ + eth_dev_id = strtoul(token, NULL, 10); + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL); + + token = strtok(NULL, ","); + if (token == NULL || strlen(token) == 0 || !isdigit(*token)) + return -1; + + /* Get Rx queue ID from parameter string */ + rx_queue_id = strtoul(token, NULL, 10); + if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { + RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id); + return -EINVAL; + } + + token = strtok(NULL, "\0"); + if (token != NULL) + RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev" + " telemetry command, ignoring"); + + if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id, + eth_dev_id, + rx_queue_id)) { + RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats"); + return -1; + } + + return 0; +} + RTE_INIT(rxa_init_telemetry) { rte_telemetry_register_cmd("/eventdev/rxa_stats", @@ -3205,4 +3509,12 @@ RTE_INIT(rxa_init_telemetry) rte_telemetry_register_cmd("/eventdev/rxa_queue_conf", handle_rxa_get_queue_conf, "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id"); + + rte_telemetry_register_cmd("/eventdev/rxa_queue_stats", + handle_rxa_get_queue_stats, + "Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id"); + + rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset", + handle_rxa_queue_stats_reset, + "Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id"); }