1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation.
10 #include <rte_cycles.h>
11 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_service_component.h>
18 #include <rte_thash.h>
19 #include <rte_interrupts.h>
20 #include <rte_mbuf_dyn.h>
21 #include <rte_telemetry.h>
23 #include "rte_eventdev.h"
24 #include "eventdev_pmd.h"
25 #include "eventdev_trace.h"
26 #include "rte_event_eth_rx_adapter.h"
29 #define BLOCK_CNT_THRESHOLD 10
30 #define ETH_EVENT_BUFFER_SIZE (6*BATCH_SIZE)
31 #define MAX_VECTOR_SIZE 1024
32 #define MIN_VECTOR_SIZE 4
33 #define MAX_VECTOR_NS 1E9
34 #define MIN_VECTOR_NS 1E5
36 #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
37 #define ETH_RX_ADAPTER_MEM_NAME_LEN 32
39 #define RSS_KEY_SIZE 40
40 /* value written to intr thread pipe to signal thread exit */
41 #define ETH_BRIDGE_INTR_THREAD_EXIT 1
42 /* Sentinel value to detect initialized file handle */
45 #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
48 * Used to store port and queue ID of interrupting Rx queue
60 * There is an instance of this struct per polled Rx queue added to the
63 struct eth_rx_poll_entry {
64 /* Eth port to poll */
66 /* Eth rx queue to poll */
70 struct eth_rx_vector_data {
71 TAILQ_ENTRY(eth_rx_vector_data) next;
74 uint16_t max_vector_count;
77 uint64_t vector_timeout_ticks;
78 struct rte_mempool *vector_pool;
79 struct rte_event_vector *vector_ev;
80 } __rte_cache_aligned;
82 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
84 /* Instance per adapter */
85 struct eth_event_enqueue_buffer {
86 /* Count of events in this buffer */
88 /* Array of events in this buffer */
89 struct rte_event *events;
90 /* size of event buffer */
92 /* Event enqueue happens from head */
94 /* New packets from rte_eth_rx_burst is enqued from tail */
96 /* last element in the buffer before rollover */
101 struct event_eth_rx_adapter {
103 uint8_t rss_key_be[RSS_KEY_SIZE];
104 /* Event device identifier */
106 /* Event port identifier */
107 uint8_t event_port_id;
108 /* Flag indicating per rxq event buffer */
109 bool use_queue_event_buf;
110 /* Per ethernet device structure */
111 struct eth_device_info *eth_devices;
112 /* Lock to serialize config updates with service function */
113 rte_spinlock_t rx_lock;
114 /* Max mbufs processed in any service function invocation */
116 /* Receive queues that need to be polled */
117 struct eth_rx_poll_entry *eth_rx_poll;
118 /* Size of the eth_rx_poll array */
119 uint16_t num_rx_polled;
120 /* Weighted round robin schedule */
122 /* wrr_sched[] size */
124 /* Next entry in wrr[] to begin polling */
126 /* Event burst buffer */
127 struct eth_event_enqueue_buffer event_enqueue_buffer;
128 /* Vector enable flag */
130 /* Timestamp of previous vector expiry list traversal */
131 uint64_t prev_expiry_ts;
132 /* Minimum ticks to wait before traversing expiry list */
133 uint64_t vector_tmo_ticks;
135 struct eth_rx_vector_data_list vector_list;
136 /* Per adapter stats */
137 struct rte_event_eth_rx_adapter_stats stats;
138 /* Block count, counts up to BLOCK_CNT_THRESHOLD */
139 uint16_t enq_block_count;
141 uint64_t rx_enq_block_start_ts;
142 /* epoll fd used to wait for Rx interrupts */
144 /* Num of interrupt driven interrupt queues */
145 uint32_t num_rx_intr;
146 /* Used to send <dev id, queue id> of interrupting Rx queues from
147 * the interrupt thread to the Rx thread
149 struct rte_ring *intr_ring;
150 /* Rx Queue data (dev id, queue id) for the last non-empty
154 /* queue_data is valid */
156 /* Interrupt ring lock, synchronizes Rx thread
157 * and interrupt thread
159 rte_spinlock_t intr_ring_lock;
160 /* event array passed to rte_poll_wait */
161 struct rte_epoll_event *epoll_events;
162 /* Count of interrupt vectors in use */
163 uint32_t num_intr_vec;
164 /* Thread blocked on Rx interrupts */
165 pthread_t rx_intr_thread;
166 /* Configuration callback for rte_service configuration */
167 rte_event_eth_rx_adapter_conf_cb conf_cb;
168 /* Configuration callback argument */
170 /* Set if default_cb is being used */
172 /* Service initialization state */
173 uint8_t service_inited;
174 /* Total count of Rx queues in adapter */
176 /* Memory allocation name */
177 char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
178 /* Socket identifier cached from eventdev */
180 /* Per adapter EAL service */
182 /* Adapter started flag */
186 } __rte_cache_aligned;
189 struct eth_device_info {
190 struct rte_eth_dev *dev;
191 struct eth_rx_queue_info *rx_queue;
193 rte_event_eth_rx_adapter_cb_fn cb_fn;
194 /* Rx callback argument */
196 /* Set if ethdev->eventdev packet transfer uses a
199 uint8_t internal_event_port;
200 /* Set if the adapter is processing rx queues for
201 * this eth device and packet processing has been
202 * started, allows for the code to know if the PMD
203 * rx_adapter_stop callback needs to be invoked
205 uint8_t dev_rx_started;
206 /* Number of queues added for this device */
207 uint16_t nb_dev_queues;
208 /* Number of poll based queues
209 * If nb_rx_poll > 0, the start callback will
210 * be invoked if not already invoked
213 /* Number of interrupt based queues
214 * If nb_rx_intr > 0, the start callback will
215 * be invoked if not already invoked.
218 /* Number of queues that use the shared interrupt */
219 uint16_t nb_shared_intr;
220 /* sum(wrr(q)) for all queues within the device
221 * useful when deleting all device queues
224 /* Intr based queue index to start polling from, this is used
225 * if the number of shared interrupts is non-zero
228 /* Intr based queue indices */
229 uint16_t *intr_queue;
230 /* device generates per Rx queue interrupt for queue index
231 * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
234 /* shared interrupt enabled */
235 int shared_intr_enabled;
239 struct eth_rx_queue_info {
240 int queue_enabled; /* True if added */
243 uint16_t wt; /* Polling weight */
244 uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
246 struct eth_rx_vector_data vector_data;
247 struct eth_event_enqueue_buffer *event_buf;
250 static struct event_eth_rx_adapter **event_eth_rx_adapter;
252 /* Enable dynamic timestamp field in mbuf */
253 static uint64_t event_eth_rx_timestamp_dynflag;
254 static int event_eth_rx_timestamp_dynfield_offset = -1;
256 static inline rte_mbuf_timestamp_t *
257 rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
259 return RTE_MBUF_DYNFIELD(mbuf,
260 event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
264 rxa_validate_id(uint8_t id)
266 return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
269 static inline struct eth_event_enqueue_buffer *
270 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
271 uint16_t rx_queue_id)
273 if (rx_adapter->use_queue_event_buf) {
274 struct eth_device_info *dev_info =
275 &rx_adapter->eth_devices[eth_dev_id];
276 return dev_info->rx_queue[rx_queue_id].event_buf;
278 return &rx_adapter->event_enqueue_buffer;
281 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
282 if (!rxa_validate_id(id)) { \
283 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
289 rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
291 return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
294 /* Greatest common divisor */
295 static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
299 return r ? rxa_gcd_u16(b, r) : b;
302 /* Returns the next queue in the polling sequence
304 * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
307 rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
308 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
309 uint16_t gcd, int prev)
325 q = eth_rx_poll[i].eth_rx_qid;
326 d = eth_rx_poll[i].eth_dev_id;
327 w = rx_adapter->eth_devices[d].rx_queue[q].wt;
335 rxa_shared_intr(struct eth_device_info *dev_info,
340 if (dev_info->dev->intr_handle == NULL)
343 multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
344 return !multi_intr_cap ||
345 rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
349 rxa_intr_queue(struct eth_device_info *dev_info,
352 struct eth_rx_queue_info *queue_info;
354 queue_info = &dev_info->rx_queue[rx_queue_id];
355 return dev_info->rx_queue &&
356 !dev_info->internal_event_port &&
357 queue_info->queue_enabled && queue_info->wt == 0;
361 rxa_polled_queue(struct eth_device_info *dev_info,
364 struct eth_rx_queue_info *queue_info;
366 queue_info = &dev_info->rx_queue[rx_queue_id];
367 return !dev_info->internal_event_port &&
368 dev_info->rx_queue &&
369 queue_info->queue_enabled && queue_info->wt != 0;
372 /* Calculate change in number of vectors after Rx queue ID is add/deleted */
374 rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
380 nbq = dev_info->dev->data->nb_rx_queues;
381 n = 0; /* non shared count */
382 s = 0; /* shared count */
384 if (rx_queue_id == -1) {
385 for (i = 0; i < nbq; i++) {
386 if (!rxa_shared_intr(dev_info, i))
387 n += add ? !rxa_intr_queue(dev_info, i) :
388 rxa_intr_queue(dev_info, i);
390 s += add ? !rxa_intr_queue(dev_info, i) :
391 rxa_intr_queue(dev_info, i);
395 if ((add && dev_info->nb_shared_intr == 0) ||
396 (!add && dev_info->nb_shared_intr))
400 if (!rxa_shared_intr(dev_info, rx_queue_id))
401 n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
402 rxa_intr_queue(dev_info, rx_queue_id);
404 n = add ? !dev_info->nb_shared_intr :
405 dev_info->nb_shared_intr == 1;
411 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
414 rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
415 struct eth_device_info *dev_info, int rx_queue_id,
416 uint32_t *nb_rx_intr)
420 if (rx_queue_id == -1)
421 intr_diff = dev_info->nb_rx_intr;
423 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
425 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
428 /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
429 * interrupt queues could currently be poll mode Rx queues
432 rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
433 struct eth_device_info *dev_info, int rx_queue_id,
434 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
439 uint32_t wrr_len_diff;
441 if (rx_queue_id == -1) {
442 intr_diff = dev_info->dev->data->nb_rx_queues -
443 dev_info->nb_rx_intr;
444 poll_diff = dev_info->nb_rx_poll;
445 wrr_len_diff = dev_info->wrr_len;
447 intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
448 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
449 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
453 *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
454 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
455 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
458 /* Calculate size of the eth_rx_poll and wrr_sched arrays
459 * after deleting poll mode rx queues
462 rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
463 struct eth_device_info *dev_info, int rx_queue_id,
464 uint32_t *nb_rx_poll, uint32_t *nb_wrr)
467 uint32_t wrr_len_diff;
469 if (rx_queue_id == -1) {
470 poll_diff = dev_info->nb_rx_poll;
471 wrr_len_diff = dev_info->wrr_len;
473 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
474 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
478 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
479 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
482 /* Calculate nb_rx_* after adding poll mode rx queues
485 rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
486 struct eth_device_info *dev_info, int rx_queue_id,
487 uint16_t wt, uint32_t *nb_rx_poll,
488 uint32_t *nb_rx_intr, uint32_t *nb_wrr)
492 uint32_t wrr_len_diff;
494 if (rx_queue_id == -1) {
495 intr_diff = dev_info->nb_rx_intr;
496 poll_diff = dev_info->dev->data->nb_rx_queues -
497 dev_info->nb_rx_poll;
498 wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
501 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
502 poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
503 wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
504 wt - dev_info->rx_queue[rx_queue_id].wt :
508 *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
509 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
510 *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
513 /* Calculate nb_rx_* after adding rx_queue_id */
515 rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
516 struct eth_device_info *dev_info, int rx_queue_id,
517 uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
521 rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
522 wt, nb_rx_poll, nb_rx_intr, nb_wrr);
524 rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
525 nb_rx_poll, nb_rx_intr, nb_wrr);
528 /* Calculate nb_rx_* after deleting rx_queue_id */
530 rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
531 struct eth_device_info *dev_info, int rx_queue_id,
532 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
535 rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
537 rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
542 * Allocate the rx_poll array
544 static struct eth_rx_poll_entry *
545 rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
549 len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
550 RTE_CACHE_LINE_SIZE);
551 return rte_zmalloc_socket(rx_adapter->mem_name,
554 rx_adapter->socket_id);
558 * Allocate the WRR array
561 rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
565 len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
566 RTE_CACHE_LINE_SIZE);
567 return rte_zmalloc_socket(rx_adapter->mem_name,
570 rx_adapter->socket_id);
574 rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
575 uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
576 uint32_t **wrr_sched)
585 *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
586 if (*rx_poll == NULL) {
591 *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
592 if (*wrr_sched == NULL) {
599 /* Precalculate WRR polling sequence for all queues in rx_adapter */
601 rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
602 struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
610 /* Initialize variables for calculation of wrr schedule */
611 uint16_t max_wrr_pos = 0;
612 unsigned int poll_q = 0;
619 /* Generate array of all queues to poll, the size of this
622 RTE_ETH_FOREACH_DEV(d) {
623 uint16_t nb_rx_queues;
624 struct eth_device_info *dev_info =
625 &rx_adapter->eth_devices[d];
626 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
627 if (dev_info->rx_queue == NULL)
629 if (dev_info->internal_event_port)
631 dev_info->wrr_len = 0;
632 for (q = 0; q < nb_rx_queues; q++) {
633 struct eth_rx_queue_info *queue_info =
634 &dev_info->rx_queue[q];
637 if (!rxa_polled_queue(dev_info, q))
640 rx_poll[poll_q].eth_dev_id = d;
641 rx_poll[poll_q].eth_rx_qid = q;
643 dev_info->wrr_len += wt;
644 max_wt = RTE_MAX(max_wt, wt);
645 gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
650 /* Generate polling sequence based on weights */
653 for (i = 0; i < max_wrr_pos; i++) {
654 rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
655 rx_poll, max_wt, gcd, prev);
661 rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
662 struct rte_ipv6_hdr **ipv6_hdr)
664 struct rte_ether_hdr *eth_hdr =
665 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
666 struct rte_vlan_hdr *vlan_hdr;
671 switch (eth_hdr->ether_type) {
672 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
673 *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
676 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
677 *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
680 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
681 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
682 switch (vlan_hdr->eth_proto) {
683 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
684 *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
686 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
687 *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
699 /* Calculate RSS hash for IPv4/6 */
700 static inline uint32_t
701 rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
705 struct rte_ipv4_tuple ipv4_tuple;
706 struct rte_ipv6_tuple ipv6_tuple;
707 struct rte_ipv4_hdr *ipv4_hdr;
708 struct rte_ipv6_hdr *ipv6_hdr;
710 rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
713 ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
714 ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
716 input_len = RTE_THASH_V4_L3_LEN;
717 } else if (ipv6_hdr) {
718 rte_thash_load_v6_addrs(ipv6_hdr,
719 (union rte_thash_tuple *)&ipv6_tuple);
721 input_len = RTE_THASH_V6_L3_LEN;
725 return rte_softrss_be(tuple, input_len, rss_key_be);
729 rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
731 return !!rx_adapter->enq_block_count;
735 rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
737 if (rx_adapter->rx_enq_block_start_ts)
740 rx_adapter->enq_block_count++;
741 if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
744 rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
748 rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
749 struct rte_event_eth_rx_adapter_stats *stats)
751 if (unlikely(!stats->rx_enq_start_ts))
752 stats->rx_enq_start_ts = rte_get_tsc_cycles();
754 if (likely(!rxa_enq_blocked(rx_adapter)))
757 rx_adapter->enq_block_count = 0;
758 if (rx_adapter->rx_enq_block_start_ts) {
759 stats->rx_enq_end_ts = rte_get_tsc_cycles();
760 stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
761 rx_adapter->rx_enq_block_start_ts;
762 rx_adapter->rx_enq_block_start_ts = 0;
766 /* Enqueue buffered events to event device */
767 static inline uint16_t
768 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
769 struct eth_event_enqueue_buffer *buf)
771 struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
772 uint16_t count = buf->last ? buf->last - buf->head : buf->count;
777 uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
778 rx_adapter->event_port_id,
779 &buf->events[buf->head],
782 stats->rx_enq_retry++;
786 if (buf->last && n == count) {
789 n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
790 rx_adapter->event_port_id,
795 stats->rx_enq_retry++;
803 n ? rxa_enq_block_end_ts(rx_adapter, stats) :
804 rxa_enq_block_start_ts(rx_adapter);
807 stats->rx_enq_count += n;
813 rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
814 struct eth_rx_vector_data *vec)
816 vec->vector_ev->nb_elem = 0;
817 vec->vector_ev->port = vec->port;
818 vec->vector_ev->queue = vec->queue;
819 vec->vector_ev->attr_valid = true;
820 TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
823 static inline uint16_t
824 rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
825 struct eth_rx_queue_info *queue_info,
826 struct eth_event_enqueue_buffer *buf,
827 struct rte_mbuf **mbufs, uint16_t num)
829 struct rte_event *ev = &buf->events[buf->count];
830 struct eth_rx_vector_data *vec;
831 uint16_t filled, space, sz;
834 vec = &queue_info->vector_data;
836 if (vec->vector_ev == NULL) {
837 if (rte_mempool_get(vec->vector_pool,
838 (void **)&vec->vector_ev) < 0) {
839 rte_pktmbuf_free_bulk(mbufs, num);
842 rxa_init_vector(rx_adapter, vec);
845 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
847 ev->event = vec->event;
848 ev->vec = vec->vector_ev;
851 vec->vector_ev = NULL;
852 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
853 if (rte_mempool_get(vec->vector_pool,
854 (void **)&vec->vector_ev) < 0) {
855 rte_pktmbuf_free_bulk(mbufs, num);
858 rxa_init_vector(rx_adapter, vec);
861 space = vec->max_vector_count - vec->vector_ev->nb_elem;
862 sz = num > space ? space : num;
863 memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
864 sizeof(void *) * sz);
865 vec->vector_ev->nb_elem += sz;
868 vec->ts = rte_rdtsc();
871 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
872 ev->event = vec->event;
873 ev->vec = vec->vector_ev;
876 vec->vector_ev = NULL;
877 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
884 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
885 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
886 struct eth_event_enqueue_buffer *buf)
889 struct eth_device_info *dev_info =
890 &rx_adapter->eth_devices[eth_dev_id];
891 struct eth_rx_queue_info *eth_rx_queue_info =
892 &dev_info->rx_queue[rx_queue_id];
893 uint16_t new_tail = buf->tail;
894 uint64_t event = eth_rx_queue_info->event;
895 uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
896 struct rte_mbuf *m = mbufs[0];
902 uint64_t ts, ts_mask;
904 if (!eth_rx_queue_info->ena_vector) {
905 ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
906 0 : rte_get_tsc_cycles();
908 /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
911 ts_mask = (uint64_t)(!(m->ol_flags &
912 event_eth_rx_timestamp_dynflag)) - 1ULL;
914 /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
915 rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
916 do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
917 for (i = 0; i < num; i++) {
918 struct rte_event *ev;
921 *rxa_timestamp_dynfield(m) = ts |
922 (*rxa_timestamp_dynfield(m) & ts_mask);
924 ev = &buf->events[new_tail];
926 rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
929 ev->flow_id = (rss & ~flow_id_mask) |
930 (ev->flow_id & flow_id_mask);
935 num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
939 if (num && dev_info->cb_fn) {
942 nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
944 (buf->events_size & ~buf->last_mask),
945 buf->count >= BATCH_SIZE ?
946 buf->count - BATCH_SIZE : 0,
947 &buf->events[buf->tail],
951 if (unlikely(nb_cb > num))
952 RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
957 rx_adapter->stats.rx_dropped += dropped;
965 rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
967 uint32_t nb_req = buf->tail + BATCH_SIZE;
970 if (nb_req <= buf->events_size)
973 if (buf->head >= BATCH_SIZE) {
975 buf->last = buf->tail;
981 return nb_req <= buf->head;
984 /* Enqueue packets from <port, q> to event buffer */
985 static inline uint32_t
986 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
987 uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
988 int *rxq_empty, struct eth_event_enqueue_buffer *buf)
990 struct rte_mbuf *mbufs[BATCH_SIZE];
991 struct rte_event_eth_rx_adapter_stats *stats =
998 /* Don't do a batch dequeue from the rx queue if there isn't
999 * enough space in the enqueue buffer.
1001 while (rxa_pkt_buf_available(buf)) {
1002 if (buf->count >= BATCH_SIZE)
1003 rxa_flush_event_buffer(rx_adapter, buf);
1005 stats->rx_poll_count++;
1006 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
1012 rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
1014 if (rx_count + nb_rx > max_rx)
1019 rxa_flush_event_buffer(rx_adapter, buf);
1025 rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
1030 union queue_data qd;
1031 struct eth_device_info *dev_info;
1032 struct eth_rx_queue_info *queue_info;
1039 dev_info = &rx_adapter->eth_devices[port_id];
1040 queue_info = &dev_info->rx_queue[queue];
1041 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1042 if (rxa_shared_intr(dev_info, queue))
1043 intr_enabled = &dev_info->shared_intr_enabled;
1045 intr_enabled = &queue_info->intr_enabled;
1047 if (*intr_enabled) {
1049 err = rte_ring_enqueue(rx_adapter->intr_ring, data);
1050 /* Entry should always be available.
1051 * The ring size equals the maximum number of interrupt
1052 * vectors supported (an interrupt vector is shared in
1053 * case of shared interrupts)
1056 RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
1057 " to ring: %s", strerror(-err));
1059 rte_eth_dev_rx_intr_disable(port_id, queue);
1061 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1065 rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
1066 uint32_t num_intr_vec)
1068 if (rx_adapter->num_intr_vec + num_intr_vec >
1069 RTE_EVENT_ETH_INTR_RING_SIZE) {
1070 RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
1071 " %d needed %d limit %d", rx_adapter->num_intr_vec,
1072 num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
1079 /* Delete entries for (dev, queue) from the interrupt ring */
1081 rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
1082 struct eth_device_info *dev_info,
1083 uint16_t rx_queue_id)
1086 union queue_data qd;
1088 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1090 n = rte_ring_count(rx_adapter->intr_ring);
1091 for (i = 0; i < n; i++) {
1092 rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1093 if (!rxa_shared_intr(dev_info, rx_queue_id)) {
1094 if (qd.port == dev_info->dev->data->port_id &&
1095 qd.queue == rx_queue_id)
1098 if (qd.port == dev_info->dev->data->port_id)
1101 rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
1104 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1107 /* pthread callback handling interrupt mode receive queues
1108 * After receiving an Rx interrupt, it enqueues the port id and queue id of the
1109 * interrupting queue to the adapter's ring buffer for interrupt events.
1110 * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
1111 * the adapter service function.
1114 rxa_intr_thread(void *arg)
1116 struct event_eth_rx_adapter *rx_adapter = arg;
1117 struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
1121 n = rte_epoll_wait(rx_adapter->epd, epoll_events,
1122 RTE_EVENT_ETH_INTR_RING_SIZE, -1);
1123 if (unlikely(n < 0))
1124 RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
1126 for (i = 0; i < n; i++) {
1127 rxa_intr_ring_enqueue(rx_adapter,
1128 epoll_events[i].epdata.data);
1135 /* Dequeue <port, q> from interrupt ring and enqueue received
1138 static inline uint32_t
1139 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
1144 struct eth_event_enqueue_buffer *buf;
1145 rte_spinlock_t *ring_lock;
1146 uint8_t max_done = 0;
1148 if (rx_adapter->num_rx_intr == 0)
1151 if (rte_ring_count(rx_adapter->intr_ring) == 0
1152 && !rx_adapter->qd_valid)
1155 buf = &rx_adapter->event_enqueue_buffer;
1156 ring_lock = &rx_adapter->intr_ring_lock;
1158 if (buf->count >= BATCH_SIZE)
1159 rxa_flush_event_buffer(rx_adapter, buf);
1161 while (rxa_pkt_buf_available(buf)) {
1162 struct eth_device_info *dev_info;
1165 union queue_data qd = rx_adapter->qd;
1168 if (!rx_adapter->qd_valid) {
1169 struct eth_rx_queue_info *queue_info;
1171 rte_spinlock_lock(ring_lock);
1172 err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1174 rte_spinlock_unlock(ring_lock);
1180 rx_adapter->qd = qd;
1181 rx_adapter->qd_valid = 1;
1182 dev_info = &rx_adapter->eth_devices[port];
1183 if (rxa_shared_intr(dev_info, queue))
1184 dev_info->shared_intr_enabled = 1;
1186 queue_info = &dev_info->rx_queue[queue];
1187 queue_info->intr_enabled = 1;
1189 rte_eth_dev_rx_intr_enable(port, queue);
1190 rte_spinlock_unlock(ring_lock);
1195 dev_info = &rx_adapter->eth_devices[port];
1198 if (rxa_shared_intr(dev_info, queue)) {
1202 nb_queues = dev_info->dev->data->nb_rx_queues;
1204 for (i = dev_info->next_q_idx; i < nb_queues; i++) {
1205 uint8_t enq_buffer_full;
1207 if (!rxa_intr_queue(dev_info, i))
1209 n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
1210 rx_adapter->max_nb_rx,
1214 enq_buffer_full = !rxq_empty && n == 0;
1215 max_done = nb_rx > rx_adapter->max_nb_rx;
1217 if (enq_buffer_full || max_done) {
1218 dev_info->next_q_idx = i;
1223 rx_adapter->qd_valid = 0;
1225 /* Reinitialize for next interrupt */
1226 dev_info->next_q_idx = dev_info->multi_intr_cap ?
1227 RTE_MAX_RXTX_INTR_VEC_ID - 1 :
1230 n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
1231 rx_adapter->max_nb_rx,
1233 rx_adapter->qd_valid = !rxq_empty;
1235 if (nb_rx > rx_adapter->max_nb_rx)
1241 rx_adapter->stats.rx_intr_packets += nb_rx;
1246 * Polls receive queues added to the event adapter and enqueues received
1247 * packets to the event device.
1249 * The receive code enqueues initially to a temporary buffer, the
1250 * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
1252 * If there isn't space available in the temporary buffer, packets from the
1253 * Rx queue aren't dequeued from the eth device, this back pressures the
1254 * eth device, in virtual device environments this back pressure is relayed to
1255 * the hypervisor's switching layer where adjustments can be made to deal with
1258 static inline uint32_t
1259 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
1263 struct eth_event_enqueue_buffer *buf = NULL;
1267 wrr_pos = rx_adapter->wrr_pos;
1268 max_nb_rx = rx_adapter->max_nb_rx;
1270 /* Iterate through a WRR sequence */
1271 for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
1272 unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
1273 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
1274 uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
1276 buf = rxa_event_buf_get(rx_adapter, d, qid);
1278 /* Don't do a batch dequeue from the rx queue if there isn't
1279 * enough space in the enqueue buffer.
1281 if (buf->count >= BATCH_SIZE)
1282 rxa_flush_event_buffer(rx_adapter, buf);
1283 if (!rxa_pkt_buf_available(buf)) {
1284 if (rx_adapter->use_queue_event_buf)
1285 goto poll_next_entry;
1287 rx_adapter->wrr_pos = wrr_pos;
1292 nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1294 if (nb_rx > max_nb_rx) {
1295 rx_adapter->wrr_pos =
1296 (wrr_pos + 1) % rx_adapter->wrr_len;
1301 if (++wrr_pos == rx_adapter->wrr_len)
1308 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
1310 struct event_eth_rx_adapter *rx_adapter = arg;
1311 struct eth_event_enqueue_buffer *buf = NULL;
1312 struct rte_event *ev;
1314 buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
1317 rxa_flush_event_buffer(rx_adapter, buf);
1319 if (vec->vector_ev->nb_elem == 0)
1321 ev = &buf->events[buf->count];
1324 ev->event = vec->event;
1325 ev->vec = vec->vector_ev;
1328 vec->vector_ev = NULL;
1333 rxa_service_func(void *args)
1335 struct event_eth_rx_adapter *rx_adapter = args;
1336 struct rte_event_eth_rx_adapter_stats *stats;
1338 if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
1340 if (!rx_adapter->rxa_started) {
1341 rte_spinlock_unlock(&rx_adapter->rx_lock);
1345 if (rx_adapter->ena_vector) {
1346 if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
1347 rx_adapter->vector_tmo_ticks) {
1348 struct eth_rx_vector_data *vec;
1350 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1351 uint64_t elapsed_time = rte_rdtsc() - vec->ts;
1353 if (elapsed_time >= vec->vector_timeout_ticks) {
1354 rxa_vector_expire(vec, rx_adapter);
1355 TAILQ_REMOVE(&rx_adapter->vector_list,
1359 rx_adapter->prev_expiry_ts = rte_rdtsc();
1363 stats = &rx_adapter->stats;
1364 stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
1365 stats->rx_packets += rxa_poll(rx_adapter);
1366 rte_spinlock_unlock(&rx_adapter->rx_lock);
1371 rte_event_eth_rx_adapter_init(void)
1373 const char *name = RXA_ADAPTER_ARRAY;
1374 const struct rte_memzone *mz;
1377 sz = sizeof(*event_eth_rx_adapter) *
1378 RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
1379 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
1381 mz = rte_memzone_lookup(name);
1383 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
1384 RTE_CACHE_LINE_SIZE);
1386 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
1392 event_eth_rx_adapter = mz->addr;
1397 rxa_memzone_lookup(void)
1399 const struct rte_memzone *mz;
1401 if (event_eth_rx_adapter == NULL) {
1402 mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1405 event_eth_rx_adapter = mz->addr;
1411 static inline struct event_eth_rx_adapter *
1412 rxa_id_to_adapter(uint8_t id)
1414 return event_eth_rx_adapter ?
1415 event_eth_rx_adapter[id] : NULL;
1419 rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
1420 struct rte_event_eth_rx_adapter_conf *conf, void *arg)
1423 struct rte_eventdev *dev;
1424 struct rte_event_dev_config dev_conf;
1427 struct rte_event_port_conf *port_conf = arg;
1428 struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
1430 dev = &rte_eventdevs[rx_adapter->eventdev_id];
1431 dev_conf = dev->data->dev_conf;
1433 started = dev->data->dev_started;
1435 rte_event_dev_stop(dev_id);
1436 port_id = dev_conf.nb_event_ports;
1437 dev_conf.nb_event_ports += 1;
1438 ret = rte_event_dev_configure(dev_id, &dev_conf);
1440 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
1443 if (rte_event_dev_start(dev_id))
1449 ret = rte_event_port_setup(dev_id, port_id, port_conf);
1451 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
1456 conf->event_port_id = port_id;
1457 conf->max_nb_rx = 128;
1459 ret = rte_event_dev_start(dev_id);
1460 rx_adapter->default_cb_arg = 1;
1465 rxa_epoll_create1(void)
1469 fd = epoll_create1(EPOLL_CLOEXEC);
1470 return fd < 0 ? -errno : fd;
1477 rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
1479 if (rx_adapter->epd != INIT_FD)
1482 rx_adapter->epd = rxa_epoll_create1();
1483 if (rx_adapter->epd < 0) {
1484 int err = rx_adapter->epd;
1485 rx_adapter->epd = INIT_FD;
1486 RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
1494 rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1497 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1499 if (rx_adapter->intr_ring)
1502 rx_adapter->intr_ring = rte_ring_create("intr_ring",
1503 RTE_EVENT_ETH_INTR_RING_SIZE,
1504 rte_socket_id(), 0);
1505 if (!rx_adapter->intr_ring)
1508 rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
1509 RTE_EVENT_ETH_INTR_RING_SIZE *
1510 sizeof(struct rte_epoll_event),
1511 RTE_CACHE_LINE_SIZE,
1512 rx_adapter->socket_id);
1513 if (!rx_adapter->epoll_events) {
1518 rte_spinlock_init(&rx_adapter->intr_ring_lock);
1520 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
1521 "rx-intr-thread-%d", rx_adapter->id);
1523 err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
1524 NULL, rxa_intr_thread, rx_adapter);
1528 RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1529 rte_free(rx_adapter->epoll_events);
1531 rte_ring_free(rx_adapter->intr_ring);
1532 rx_adapter->intr_ring = NULL;
1533 rx_adapter->epoll_events = NULL;
1538 rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1542 err = pthread_cancel(rx_adapter->rx_intr_thread);
1544 RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
1547 err = pthread_join(rx_adapter->rx_intr_thread, NULL);
1549 RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
1551 rte_free(rx_adapter->epoll_events);
1552 rte_ring_free(rx_adapter->intr_ring);
1553 rx_adapter->intr_ring = NULL;
1554 rx_adapter->epoll_events = NULL;
1559 rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
1563 if (rx_adapter->num_rx_intr == 0)
1566 ret = rxa_destroy_intr_thread(rx_adapter);
1570 close(rx_adapter->epd);
1571 rx_adapter->epd = INIT_FD;
1577 rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
1578 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1581 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1582 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1584 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1586 RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
1591 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1596 RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
1599 dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
1601 dev_info->shared_intr_enabled = 0;
1606 rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1607 struct eth_device_info *dev_info, int rx_queue_id)
1613 if (dev_info->nb_rx_intr == 0)
1617 if (rx_queue_id == -1) {
1618 s = dev_info->nb_shared_intr;
1619 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1623 q = dev_info->intr_queue[i];
1624 sintr = rxa_shared_intr(dev_info, q);
1627 if (!sintr || s == 0) {
1629 err = rxa_disable_intr(rx_adapter, dev_info,
1633 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1638 if (!rxa_intr_queue(dev_info, rx_queue_id))
1640 if (!rxa_shared_intr(dev_info, rx_queue_id) ||
1641 dev_info->nb_shared_intr == 1) {
1642 err = rxa_disable_intr(rx_adapter, dev_info,
1646 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1650 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1651 if (dev_info->intr_queue[i] == rx_queue_id) {
1652 for (; i < dev_info->nb_rx_intr - 1; i++)
1653 dev_info->intr_queue[i] =
1654 dev_info->intr_queue[i + 1];
1664 rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
1665 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1668 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1669 union queue_data qd;
1671 uint16_t *intr_queue;
1672 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1674 if (rxa_intr_queue(dev_info, rx_queue_id))
1677 intr_queue = dev_info->intr_queue;
1678 if (dev_info->intr_queue == NULL) {
1680 dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
1681 dev_info->intr_queue =
1683 rx_adapter->mem_name,
1686 rx_adapter->socket_id);
1687 if (dev_info->intr_queue == NULL)
1691 init_fd = rx_adapter->epd;
1692 err = rxa_init_epd(rx_adapter);
1694 goto err_free_queue;
1696 qd.port = eth_dev_id;
1697 qd.queue = rx_queue_id;
1699 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1704 RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
1705 " Rx Queue %u err %d", rx_queue_id, err);
1709 err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
1711 RTE_EDEV_LOG_ERR("Could not enable interrupt for"
1712 " Rx Queue %u err %d", rx_queue_id, err);
1717 err = rxa_create_intr_thread(rx_adapter);
1720 dev_info->shared_intr_enabled = 1;
1722 dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
1727 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1729 RTE_EDEV_LOG_ERR("Could not disable interrupt for"
1730 " Rx Queue %u err %d", rx_queue_id, err);
1732 err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1737 RTE_EDEV_LOG_ERR("Could not delete event for"
1738 " Rx Queue %u err %d", rx_queue_id, err1);
1741 if (init_fd == INIT_FD) {
1742 close(rx_adapter->epd);
1743 rx_adapter->epd = -1;
1746 if (intr_queue == NULL)
1747 rte_free(dev_info->intr_queue);
1753 rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1754 struct eth_device_info *dev_info, int rx_queue_id)
1759 int shared_done = (dev_info->nb_shared_intr > 0);
1761 if (rx_queue_id != -1) {
1762 if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
1764 return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
1768 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
1770 if (rxa_shared_intr(dev_info, i) && shared_done)
1773 err = rxa_config_intr(rx_adapter, dev_info, i);
1775 shared_done = err == 0 && rxa_shared_intr(dev_info, i);
1778 dev_info->shared_intr_enabled = 1;
1787 shared_done = (dev_info->nb_shared_intr > 0);
1788 for (j = 0; j < i; j++) {
1789 if (rxa_intr_queue(dev_info, j))
1791 if (rxa_shared_intr(dev_info, j) && si != j)
1793 err = rxa_disable_intr(rx_adapter, dev_info, j);
1803 rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
1806 struct rte_service_spec service;
1807 struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
1809 if (rx_adapter->service_inited)
1812 memset(&service, 0, sizeof(service));
1813 snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
1814 "rte_event_eth_rx_adapter_%d", id);
1815 service.socket_id = rx_adapter->socket_id;
1816 service.callback = rxa_service_func;
1817 service.callback_userdata = rx_adapter;
1818 /* Service function handles locking for queue add/del updates */
1819 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
1820 ret = rte_service_component_register(&service, &rx_adapter->service_id);
1822 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
1827 ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
1828 &rx_adapter_conf, rx_adapter->conf_arg);
1830 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
1834 rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
1835 rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
1836 rx_adapter->service_inited = 1;
1837 rx_adapter->epd = INIT_FD;
1841 rte_service_component_unregister(rx_adapter->service_id);
1846 rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
1847 struct eth_device_info *dev_info, int32_t rx_queue_id,
1850 struct eth_rx_queue_info *queue_info;
1854 if (dev_info->rx_queue == NULL)
1857 if (rx_queue_id == -1) {
1858 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
1859 rxa_update_queue(rx_adapter, dev_info, i, add);
1861 queue_info = &dev_info->rx_queue[rx_queue_id];
1862 enabled = queue_info->queue_enabled;
1864 rx_adapter->nb_queues += !enabled;
1865 dev_info->nb_dev_queues += !enabled;
1867 rx_adapter->nb_queues -= enabled;
1868 dev_info->nb_dev_queues -= enabled;
1870 queue_info->queue_enabled = !!add;
1875 rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
1876 uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
1879 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
1880 struct eth_rx_vector_data *vector_data;
1883 vector_data = &queue_info->vector_data;
1884 vector_data->max_vector_count = vector_count;
1885 vector_data->port = port_id;
1886 vector_data->queue = qid;
1887 vector_data->vector_pool = mp;
1888 vector_data->vector_timeout_ticks =
1889 NSEC2TICK(vector_ns, rte_get_timer_hz());
1890 vector_data->ts = 0;
1891 flow_id = queue_info->event & 0xFFFFF;
1893 flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
1894 vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
1898 rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
1899 struct eth_device_info *dev_info, int32_t rx_queue_id)
1901 struct eth_rx_vector_data *vec;
1907 if (rx_adapter->nb_queues == 0)
1910 if (rx_queue_id == -1) {
1911 uint16_t nb_rx_queues;
1914 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1915 for (i = 0; i < nb_rx_queues; i++)
1916 rxa_sw_del(rx_adapter, dev_info, i);
1920 /* Push all the partial event vectors to event device. */
1921 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1922 if (vec->queue != rx_queue_id)
1924 rxa_vector_expire(vec, rx_adapter);
1925 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
1928 pollq = rxa_polled_queue(dev_info, rx_queue_id);
1929 intrq = rxa_intr_queue(dev_info, rx_queue_id);
1930 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1931 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
1932 rx_adapter->num_rx_polled -= pollq;
1933 dev_info->nb_rx_poll -= pollq;
1934 rx_adapter->num_rx_intr -= intrq;
1935 dev_info->nb_rx_intr -= intrq;
1936 dev_info->nb_shared_intr -= intrq && sintrq;
1937 if (rx_adapter->use_queue_event_buf) {
1938 struct eth_event_enqueue_buffer *event_buf =
1939 dev_info->rx_queue[rx_queue_id].event_buf;
1940 rte_free(event_buf->events);
1941 rte_free(event_buf);
1942 dev_info->rx_queue[rx_queue_id].event_buf = NULL;
1947 rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
1948 struct eth_device_info *dev_info, int32_t rx_queue_id,
1949 const struct rte_event_eth_rx_adapter_queue_conf *conf)
1951 struct eth_rx_queue_info *queue_info;
1952 const struct rte_event *ev = &conf->ev;
1956 struct rte_event *qi_ev;
1957 struct eth_event_enqueue_buffer *new_rx_buf = NULL;
1958 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1961 if (rx_queue_id == -1) {
1962 uint16_t nb_rx_queues;
1965 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1966 for (i = 0; i < nb_rx_queues; i++) {
1967 ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
1974 pollq = rxa_polled_queue(dev_info, rx_queue_id);
1975 intrq = rxa_intr_queue(dev_info, rx_queue_id);
1976 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1978 queue_info = &dev_info->rx_queue[rx_queue_id];
1979 queue_info->wt = conf->servicing_weight;
1981 qi_ev = (struct rte_event *)&queue_info->event;
1982 qi_ev->event = ev->event;
1983 qi_ev->op = RTE_EVENT_OP_NEW;
1984 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
1985 qi_ev->sub_event_type = 0;
1987 if (conf->rx_queue_flags &
1988 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
1989 queue_info->flow_id_mask = ~0;
1993 if (conf->rx_queue_flags &
1994 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
1995 queue_info->ena_vector = 1;
1996 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
1997 rxa_set_vector_data(queue_info, conf->vector_sz,
1998 conf->vector_timeout_ns, conf->vector_mp,
1999 rx_queue_id, dev_info->dev->data->port_id);
2000 rx_adapter->ena_vector = 1;
2001 rx_adapter->vector_tmo_ticks =
2002 rx_adapter->vector_tmo_ticks ?
2003 RTE_MIN(queue_info->vector_data
2004 .vector_timeout_ticks >>
2006 rx_adapter->vector_tmo_ticks) :
2007 queue_info->vector_data.vector_timeout_ticks >>
2011 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
2012 if (rxa_polled_queue(dev_info, rx_queue_id)) {
2013 rx_adapter->num_rx_polled += !pollq;
2014 dev_info->nb_rx_poll += !pollq;
2015 rx_adapter->num_rx_intr -= intrq;
2016 dev_info->nb_rx_intr -= intrq;
2017 dev_info->nb_shared_intr -= intrq && sintrq;
2020 if (rxa_intr_queue(dev_info, rx_queue_id)) {
2021 rx_adapter->num_rx_polled -= pollq;
2022 dev_info->nb_rx_poll -= pollq;
2023 rx_adapter->num_rx_intr += !intrq;
2024 dev_info->nb_rx_intr += !intrq;
2025 dev_info->nb_shared_intr += !intrq && sintrq;
2026 if (dev_info->nb_shared_intr == 1) {
2027 if (dev_info->multi_intr_cap)
2028 dev_info->next_q_idx =
2029 RTE_MAX_RXTX_INTR_VEC_ID - 1;
2031 dev_info->next_q_idx = 0;
2035 if (!rx_adapter->use_queue_event_buf)
2038 new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2039 sizeof(*new_rx_buf), 0,
2040 rte_eth_dev_socket_id(eth_dev_id));
2041 if (new_rx_buf == NULL) {
2042 RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2043 "dev_id: %d queue_id: %d",
2044 eth_dev_id, rx_queue_id);
2048 new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2049 new_rx_buf->events_size += (2 * BATCH_SIZE);
2050 new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2051 sizeof(struct rte_event) *
2052 new_rx_buf->events_size, 0,
2053 rte_eth_dev_socket_id(eth_dev_id));
2054 if (new_rx_buf->events == NULL) {
2055 rte_free(new_rx_buf);
2056 RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2057 "dev_id: %d queue_id: %d",
2058 eth_dev_id, rx_queue_id);
2062 queue_info->event_buf = new_rx_buf;
2068 rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
2070 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2072 struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
2073 struct rte_event_eth_rx_adapter_queue_conf temp_conf;
2075 struct eth_rx_poll_entry *rx_poll;
2076 struct eth_rx_queue_info *rx_queue;
2078 uint16_t nb_rx_queues;
2079 uint32_t nb_rx_poll, nb_wrr;
2080 uint32_t nb_rx_intr;
2084 if (queue_conf->servicing_weight == 0) {
2085 struct rte_eth_dev_data *data = dev_info->dev->data;
2087 temp_conf = *queue_conf;
2088 if (!data->dev_conf.intr_conf.rxq) {
2089 /* If Rx interrupts are disabled set wt = 1 */
2090 temp_conf.servicing_weight = 1;
2092 queue_conf = &temp_conf;
2094 if (queue_conf->servicing_weight == 0 &&
2095 rx_adapter->use_queue_event_buf) {
2097 RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2098 "not supported for interrupt queues "
2099 "dev_id: %d queue_id: %d",
2100 eth_dev_id, rx_queue_id);
2105 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2106 rx_queue = dev_info->rx_queue;
2107 wt = queue_conf->servicing_weight;
2109 if (dev_info->rx_queue == NULL) {
2110 dev_info->rx_queue =
2111 rte_zmalloc_socket(rx_adapter->mem_name,
2113 sizeof(struct eth_rx_queue_info), 0,
2114 rx_adapter->socket_id);
2115 if (dev_info->rx_queue == NULL)
2121 rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
2122 queue_conf->servicing_weight,
2123 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2125 if (dev_info->dev->intr_handle)
2126 dev_info->multi_intr_cap =
2127 rte_intr_cap_multiple(dev_info->dev->intr_handle);
2129 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2132 goto err_free_rxqueue;
2135 num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
2137 ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
2139 goto err_free_rxqueue;
2141 ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
2143 goto err_free_rxqueue;
2147 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2148 num_intr_vec = rxa_nb_intr_vect(dev_info,
2150 /* interrupt based queues are being converted to
2151 * poll mode queues, delete the interrupt configuration
2154 ret = rxa_del_intr_queue(rx_adapter,
2155 dev_info, rx_queue_id);
2157 goto err_free_rxqueue;
2161 if (nb_rx_intr == 0) {
2162 ret = rxa_free_intr_resources(rx_adapter);
2164 goto err_free_rxqueue;
2170 if (rx_queue_id == -1) {
2171 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
2172 dev_info->intr_queue[i] = i;
2174 if (!rxa_intr_queue(dev_info, rx_queue_id))
2175 dev_info->intr_queue[nb_rx_intr - 1] =
2182 ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2184 goto err_free_rxqueue;
2185 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2187 rte_free(rx_adapter->eth_rx_poll);
2188 rte_free(rx_adapter->wrr_sched);
2190 rx_adapter->eth_rx_poll = rx_poll;
2191 rx_adapter->wrr_sched = rx_wrr;
2192 rx_adapter->wrr_len = nb_wrr;
2193 rx_adapter->num_intr_vec += num_intr_vec;
2197 if (rx_queue == NULL) {
2198 rte_free(dev_info->rx_queue);
2199 dev_info->rx_queue = NULL;
2209 rxa_ctrl(uint8_t id, int start)
2211 struct event_eth_rx_adapter *rx_adapter;
2212 struct rte_eventdev *dev;
2213 struct eth_device_info *dev_info;
2215 int use_service = 0;
2218 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2219 rx_adapter = rxa_id_to_adapter(id);
2220 if (rx_adapter == NULL)
2223 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2225 RTE_ETH_FOREACH_DEV(i) {
2226 dev_info = &rx_adapter->eth_devices[i];
2227 /* if start check for num dev queues */
2228 if (start && !dev_info->nb_dev_queues)
2230 /* if stop check if dev has been started */
2231 if (stop && !dev_info->dev_rx_started)
2233 use_service |= !dev_info->internal_event_port;
2234 dev_info->dev_rx_started = start;
2235 if (dev_info->internal_event_port == 0)
2237 start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
2238 &rte_eth_devices[i]) :
2239 (*dev->dev_ops->eth_rx_adapter_stop)(dev,
2240 &rte_eth_devices[i]);
2244 rte_spinlock_lock(&rx_adapter->rx_lock);
2245 rx_adapter->rxa_started = start;
2246 rte_service_runstate_set(rx_adapter->service_id, start);
2247 rte_spinlock_unlock(&rx_adapter->rx_lock);
2254 rxa_create(uint8_t id, uint8_t dev_id,
2255 struct rte_event_eth_rx_adapter_params *rxa_params,
2256 rte_event_eth_rx_adapter_conf_cb conf_cb,
2259 struct event_eth_rx_adapter *rx_adapter;
2260 struct eth_event_enqueue_buffer *buf;
2261 struct rte_event *events;
2265 char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
2266 const uint8_t default_rss_key[] = {
2267 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
2268 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
2269 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
2270 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
2271 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
2274 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2275 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2277 if (conf_cb == NULL)
2280 if (event_eth_rx_adapter == NULL) {
2281 ret = rte_event_eth_rx_adapter_init();
2286 rx_adapter = rxa_id_to_adapter(id);
2287 if (rx_adapter != NULL) {
2288 RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
2292 socket_id = rte_event_dev_socket_id(dev_id);
2293 snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
2294 "rte_event_eth_rx_adapter_%d",
2297 rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
2298 RTE_CACHE_LINE_SIZE, socket_id);
2299 if (rx_adapter == NULL) {
2300 RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
2304 rx_adapter->eventdev_id = dev_id;
2305 rx_adapter->socket_id = socket_id;
2306 rx_adapter->conf_cb = conf_cb;
2307 rx_adapter->conf_arg = conf_arg;
2308 rx_adapter->id = id;
2309 TAILQ_INIT(&rx_adapter->vector_list);
2310 strcpy(rx_adapter->mem_name, mem_name);
2311 rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
2313 sizeof(struct eth_device_info), 0,
2315 rte_convert_rss_key((const uint32_t *)default_rss_key,
2316 (uint32_t *)rx_adapter->rss_key_be,
2317 RTE_DIM(default_rss_key));
2319 if (rx_adapter->eth_devices == NULL) {
2320 RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
2321 rte_free(rx_adapter);
2325 rte_spinlock_init(&rx_adapter->rx_lock);
2327 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
2328 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
2330 /* Rx adapter event buffer allocation */
2331 rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2333 if (!rx_adapter->use_queue_event_buf) {
2334 buf = &rx_adapter->event_enqueue_buffer;
2335 buf->events_size = rxa_params->event_buf_size;
2337 events = rte_zmalloc_socket(rx_adapter->mem_name,
2338 buf->events_size * sizeof(*events),
2340 if (events == NULL) {
2341 RTE_EDEV_LOG_ERR("Failed to allocate memory "
2342 "for adapter event buffer");
2343 rte_free(rx_adapter->eth_devices);
2344 rte_free(rx_adapter);
2348 rx_adapter->event_enqueue_buffer.events = events;
2351 event_eth_rx_adapter[id] = rx_adapter;
2353 if (conf_cb == rxa_default_conf_cb)
2354 rx_adapter->default_cb_arg = 1;
2356 if (rte_mbuf_dyn_rx_timestamp_register(
2357 &event_eth_rx_timestamp_dynfield_offset,
2358 &event_eth_rx_timestamp_dynflag) != 0) {
2359 RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
2363 rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
2369 rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2370 rte_event_eth_rx_adapter_conf_cb conf_cb,
2373 struct rte_event_eth_rx_adapter_params rxa_params = {0};
2375 /* use default values for adapter params */
2376 rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2377 rxa_params.use_queue_event_buf = false;
2379 return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2383 rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2384 struct rte_event_port_conf *port_config,
2385 struct rte_event_eth_rx_adapter_params *rxa_params)
2387 struct rte_event_port_conf *pc;
2389 struct rte_event_eth_rx_adapter_params temp_params = {0};
2391 if (port_config == NULL)
2394 if (rxa_params == NULL) {
2395 /* use default values if rxa_params is NULL */
2396 rxa_params = &temp_params;
2397 rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2398 rxa_params->use_queue_event_buf = false;
2399 } else if ((!rxa_params->use_queue_event_buf &&
2400 rxa_params->event_buf_size == 0) ||
2401 (rxa_params->use_queue_event_buf &&
2402 rxa_params->event_buf_size != 0)) {
2403 RTE_EDEV_LOG_ERR("Invalid adapter params\n");
2405 } else if (!rxa_params->use_queue_event_buf) {
2406 /* adjust event buff size with BATCH_SIZE used for fetching
2407 * packets from NIC rx queues to get full buffer utilization
2408 * and prevent unnecessary rollovers.
2411 rxa_params->event_buf_size =
2412 RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
2413 rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2416 pc = rte_malloc(NULL, sizeof(*pc), 0);
2422 ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2430 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
2431 struct rte_event_port_conf *port_config)
2433 struct rte_event_port_conf *pc;
2436 if (port_config == NULL)
2439 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2441 pc = rte_malloc(NULL, sizeof(*pc), 0);
2446 ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
2447 rxa_default_conf_cb,
2455 rte_event_eth_rx_adapter_free(uint8_t id)
2457 struct event_eth_rx_adapter *rx_adapter;
2459 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2461 rx_adapter = rxa_id_to_adapter(id);
2462 if (rx_adapter == NULL)
2465 if (rx_adapter->nb_queues) {
2466 RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
2467 rx_adapter->nb_queues);
2471 if (rx_adapter->default_cb_arg)
2472 rte_free(rx_adapter->conf_arg);
2473 rte_free(rx_adapter->eth_devices);
2474 if (!rx_adapter->use_queue_event_buf)
2475 rte_free(rx_adapter->event_enqueue_buffer.events);
2476 rte_free(rx_adapter);
2477 event_eth_rx_adapter[id] = NULL;
2479 rte_eventdev_trace_eth_rx_adapter_free(id);
2484 rte_event_eth_rx_adapter_queue_add(uint8_t id,
2485 uint16_t eth_dev_id,
2486 int32_t rx_queue_id,
2487 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2491 struct event_eth_rx_adapter *rx_adapter;
2492 struct rte_eventdev *dev;
2493 struct eth_device_info *dev_info;
2494 struct rte_event_eth_rx_adapter_vector_limits limits;
2496 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2497 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2499 rx_adapter = rxa_id_to_adapter(id);
2500 if ((rx_adapter == NULL) || (queue_conf == NULL))
2503 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2504 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2508 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2509 "eth port %" PRIu16, id, eth_dev_id);
2513 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
2514 && (queue_conf->rx_queue_flags &
2515 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
2516 RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
2517 " eth port: %" PRIu16 " adapter id: %" PRIu8,
2522 if (queue_conf->rx_queue_flags &
2523 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2525 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
2526 RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2527 " eth port: %" PRIu16
2528 " adapter id: %" PRIu8,
2533 ret = rte_event_eth_rx_adapter_vector_limits_get(
2534 rx_adapter->eventdev_id, eth_dev_id, &limits);
2536 RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2537 " eth port: %" PRIu16
2538 " adapter id: %" PRIu8,
2542 if (queue_conf->vector_sz < limits.min_sz ||
2543 queue_conf->vector_sz > limits.max_sz ||
2544 queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2545 queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2546 queue_conf->vector_mp == NULL) {
2547 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2548 " eth port: %" PRIu16
2549 " adapter id: %" PRIu8,
2553 if (queue_conf->vector_mp->elt_size <
2554 (sizeof(struct rte_event_vector) +
2555 (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2556 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2557 " eth port: %" PRIu16
2558 " adapter id: %" PRIu8,
2564 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
2565 (rx_queue_id != -1)) {
2566 RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
2567 "event queue, eth port: %" PRIu16 " adapter id: %"
2568 PRIu8, eth_dev_id, id);
2572 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2573 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2574 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2575 (uint16_t)rx_queue_id);
2579 if ((rx_adapter->use_queue_event_buf &&
2580 queue_conf->event_buf_size == 0) ||
2581 (!rx_adapter->use_queue_event_buf &&
2582 queue_conf->event_buf_size != 0)) {
2583 RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2587 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2589 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2590 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
2592 if (dev_info->rx_queue == NULL) {
2593 dev_info->rx_queue =
2594 rte_zmalloc_socket(rx_adapter->mem_name,
2595 dev_info->dev->data->nb_rx_queues *
2596 sizeof(struct eth_rx_queue_info), 0,
2597 rx_adapter->socket_id);
2598 if (dev_info->rx_queue == NULL)
2602 ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
2603 &rte_eth_devices[eth_dev_id],
2604 rx_queue_id, queue_conf);
2606 dev_info->internal_event_port = 1;
2607 rxa_update_queue(rx_adapter,
2608 &rx_adapter->eth_devices[eth_dev_id],
2613 rte_spinlock_lock(&rx_adapter->rx_lock);
2614 dev_info->internal_event_port = 0;
2615 ret = rxa_init_service(rx_adapter, id);
2617 uint32_t service_id = rx_adapter->service_id;
2618 ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
2620 rte_service_component_runstate_set(service_id,
2621 rxa_sw_adapter_queue_count(rx_adapter));
2623 rte_spinlock_unlock(&rx_adapter->rx_lock);
2626 rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
2627 rx_queue_id, queue_conf, ret);
2635 rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
2637 limits->max_sz = MAX_VECTOR_SIZE;
2638 limits->min_sz = MIN_VECTOR_SIZE;
2639 limits->max_timeout_ns = MAX_VECTOR_NS;
2640 limits->min_timeout_ns = MIN_VECTOR_NS;
2646 rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
2647 int32_t rx_queue_id)
2650 struct rte_eventdev *dev;
2651 struct event_eth_rx_adapter *rx_adapter;
2652 struct eth_device_info *dev_info;
2654 uint32_t nb_rx_poll = 0;
2655 uint32_t nb_wrr = 0;
2656 uint32_t nb_rx_intr;
2657 struct eth_rx_poll_entry *rx_poll = NULL;
2658 uint32_t *rx_wrr = NULL;
2661 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2662 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2664 rx_adapter = rxa_id_to_adapter(id);
2665 if (rx_adapter == NULL)
2668 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2669 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2675 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2676 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2677 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2678 (uint16_t)rx_queue_id);
2682 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2684 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2685 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
2687 ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
2688 &rte_eth_devices[eth_dev_id],
2691 rxa_update_queue(rx_adapter,
2692 &rx_adapter->eth_devices[eth_dev_id],
2695 if (dev_info->nb_dev_queues == 0) {
2696 rte_free(dev_info->rx_queue);
2697 dev_info->rx_queue = NULL;
2701 rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
2702 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2704 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2709 rte_spinlock_lock(&rx_adapter->rx_lock);
2712 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2714 num_intr_vec = rxa_nb_intr_vect(dev_info,
2716 ret = rxa_del_intr_queue(rx_adapter, dev_info,
2722 if (nb_rx_intr == 0) {
2723 ret = rxa_free_intr_resources(rx_adapter);
2728 rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
2729 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2731 rte_free(rx_adapter->eth_rx_poll);
2732 rte_free(rx_adapter->wrr_sched);
2734 if (nb_rx_intr == 0) {
2735 rte_free(dev_info->intr_queue);
2736 dev_info->intr_queue = NULL;
2739 rx_adapter->eth_rx_poll = rx_poll;
2740 rx_adapter->wrr_sched = rx_wrr;
2741 rx_adapter->wrr_len = nb_wrr;
2743 * reset next poll start position (wrr_pos) to avoid buffer
2744 * overrun when wrr_len is reduced in case of queue delete
2746 rx_adapter->wrr_pos = 0;
2747 rx_adapter->num_intr_vec += num_intr_vec;
2749 if (dev_info->nb_dev_queues == 0) {
2750 rte_free(dev_info->rx_queue);
2751 dev_info->rx_queue = NULL;
2754 rte_spinlock_unlock(&rx_adapter->rx_lock);
2761 rte_service_component_runstate_set(rx_adapter->service_id,
2762 rxa_sw_adapter_queue_count(rx_adapter));
2765 rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
2771 rte_event_eth_rx_adapter_vector_limits_get(
2772 uint8_t dev_id, uint16_t eth_port_id,
2773 struct rte_event_eth_rx_adapter_vector_limits *limits)
2775 struct rte_eventdev *dev;
2779 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2780 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
2785 dev = &rte_eventdevs[dev_id];
2787 ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
2789 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2790 "eth port %" PRIu16,
2791 dev_id, eth_port_id);
2795 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2796 RTE_FUNC_PTR_OR_ERR_RET(
2797 *dev->dev_ops->eth_rx_adapter_vector_limits_get,
2799 ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
2800 dev, &rte_eth_devices[eth_port_id], limits);
2802 ret = rxa_sw_vector_limits(limits);
2809 rte_event_eth_rx_adapter_start(uint8_t id)
2811 rte_eventdev_trace_eth_rx_adapter_start(id);
2812 return rxa_ctrl(id, 1);
2816 rte_event_eth_rx_adapter_stop(uint8_t id)
2818 rte_eventdev_trace_eth_rx_adapter_stop(id);
2819 return rxa_ctrl(id, 0);
2823 rte_event_eth_rx_adapter_stats_get(uint8_t id,
2824 struct rte_event_eth_rx_adapter_stats *stats)
2826 struct event_eth_rx_adapter *rx_adapter;
2827 struct eth_event_enqueue_buffer *buf;
2828 struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
2829 struct rte_event_eth_rx_adapter_stats dev_stats;
2830 struct rte_eventdev *dev;
2831 struct eth_device_info *dev_info;
2835 if (rxa_memzone_lookup())
2838 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2840 rx_adapter = rxa_id_to_adapter(id);
2841 if (rx_adapter == NULL || stats == NULL)
2844 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2845 memset(stats, 0, sizeof(*stats));
2846 RTE_ETH_FOREACH_DEV(i) {
2847 dev_info = &rx_adapter->eth_devices[i];
2848 if (dev_info->internal_event_port == 0 ||
2849 dev->dev_ops->eth_rx_adapter_stats_get == NULL)
2851 ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
2852 &rte_eth_devices[i],
2856 dev_stats_sum.rx_packets += dev_stats.rx_packets;
2857 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
2860 if (rx_adapter->service_inited)
2861 *stats = rx_adapter->stats;
2863 stats->rx_packets += dev_stats_sum.rx_packets;
2864 stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2866 if (!rx_adapter->use_queue_event_buf) {
2867 buf = &rx_adapter->event_enqueue_buffer;
2868 stats->rx_event_buf_count = buf->count;
2869 stats->rx_event_buf_size = buf->events_size;
2871 stats->rx_event_buf_count = 0;
2872 stats->rx_event_buf_size = 0;
2879 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
2881 struct event_eth_rx_adapter *rx_adapter;
2882 struct rte_eventdev *dev;
2883 struct eth_device_info *dev_info;
2886 if (rxa_memzone_lookup())
2889 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2891 rx_adapter = rxa_id_to_adapter(id);
2892 if (rx_adapter == NULL)
2895 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2896 RTE_ETH_FOREACH_DEV(i) {
2897 dev_info = &rx_adapter->eth_devices[i];
2898 if (dev_info->internal_event_port == 0 ||
2899 dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
2901 (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
2902 &rte_eth_devices[i]);
2905 memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
2910 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
2912 struct event_eth_rx_adapter *rx_adapter;
2914 if (rxa_memzone_lookup())
2917 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2919 rx_adapter = rxa_id_to_adapter(id);
2920 if (rx_adapter == NULL || service_id == NULL)
2923 if (rx_adapter->service_inited)
2924 *service_id = rx_adapter->service_id;
2926 return rx_adapter->service_inited ? 0 : -ESRCH;
2930 rte_event_eth_rx_adapter_cb_register(uint8_t id,
2931 uint16_t eth_dev_id,
2932 rte_event_eth_rx_adapter_cb_fn cb_fn,
2935 struct event_eth_rx_adapter *rx_adapter;
2936 struct eth_device_info *dev_info;
2940 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2941 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2943 rx_adapter = rxa_id_to_adapter(id);
2944 if (rx_adapter == NULL)
2947 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2948 if (dev_info->rx_queue == NULL)
2951 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2955 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2956 "eth port %" PRIu16, id, eth_dev_id);
2960 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2961 RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
2962 PRIu16, eth_dev_id);
2966 rte_spinlock_lock(&rx_adapter->rx_lock);
2967 dev_info->cb_fn = cb_fn;
2968 dev_info->cb_arg = cb_arg;
2969 rte_spinlock_unlock(&rx_adapter->rx_lock);
2975 rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
2976 uint16_t eth_dev_id,
2977 uint16_t rx_queue_id,
2978 struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2980 struct rte_eventdev *dev;
2981 struct event_eth_rx_adapter *rx_adapter;
2982 struct eth_device_info *dev_info;
2983 struct eth_rx_queue_info *queue_info;
2984 struct rte_event *qi_ev;
2987 if (rxa_memzone_lookup())
2990 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2991 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2993 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2994 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
2998 if (queue_conf == NULL) {
2999 RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3003 rx_adapter = rxa_id_to_adapter(id);
3004 if (rx_adapter == NULL)
3007 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3008 if (dev_info->rx_queue == NULL ||
3009 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3010 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3014 queue_info = &dev_info->rx_queue[rx_queue_id];
3015 qi_ev = (struct rte_event *)&queue_info->event;
3017 memset(queue_conf, 0, sizeof(*queue_conf));
3018 queue_conf->rx_queue_flags = 0;
3019 if (queue_info->flow_id_mask != 0)
3020 queue_conf->rx_queue_flags |=
3021 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3022 queue_conf->servicing_weight = queue_info->wt;
3024 memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
3026 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3027 if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3028 ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3029 &rte_eth_devices[eth_dev_id],
3038 #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
3041 handle_rxa_stats(const char *cmd __rte_unused,
3043 struct rte_tel_data *d)
3045 uint8_t rx_adapter_id;
3046 struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3048 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3051 /* Get Rx adapter ID from parameter string */
3052 rx_adapter_id = atoi(params);
3053 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3055 /* Get Rx adapter stats */
3056 if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3058 RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
3062 rte_tel_data_start_dict(d);
3063 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3064 RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3065 RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3066 RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3067 RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3068 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3069 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3070 RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3071 RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3072 RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3073 RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3074 RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3080 handle_rxa_stats_reset(const char *cmd __rte_unused,
3082 struct rte_tel_data *d __rte_unused)
3084 uint8_t rx_adapter_id;
3086 if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
3089 /* Get Rx adapter ID from parameter string */
3090 rx_adapter_id = atoi(params);
3091 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3093 /* Reset Rx adapter stats */
3094 if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3095 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
3103 handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3105 struct rte_tel_data *d)
3107 uint8_t rx_adapter_id;
3108 uint16_t rx_queue_id;
3110 char *token, *l_params;
3111 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3113 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3116 /* Get Rx adapter ID from parameter string */
3117 l_params = strdup(params);
3118 token = strtok(l_params, ",");
3119 rx_adapter_id = strtoul(token, NULL, 10);
3120 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3122 token = strtok(NULL, ",");
3123 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3126 /* Get device ID from parameter string */
3127 eth_dev_id = strtoul(token, NULL, 10);
3128 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3130 token = strtok(NULL, ",");
3131 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3134 /* Get Rx queue ID from parameter string */
3135 rx_queue_id = strtoul(token, NULL, 10);
3136 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3137 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3141 token = strtok(NULL, "\0");
3143 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3144 " telemetry command, igrnoring");
3146 if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3147 rx_queue_id, &queue_conf)) {
3148 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3152 rte_tel_data_start_dict(d);
3153 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3154 rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3155 rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3156 RXA_ADD_DICT(queue_conf, rx_queue_flags);
3157 RXA_ADD_DICT(queue_conf, servicing_weight);
3158 RXA_ADD_DICT(queue_conf.ev, queue_id);
3159 RXA_ADD_DICT(queue_conf.ev, sched_type);
3160 RXA_ADD_DICT(queue_conf.ev, priority);
3161 RXA_ADD_DICT(queue_conf.ev, flow_id);
3166 RTE_INIT(rxa_init_telemetry)
3168 rte_telemetry_register_cmd("/eventdev/rxa_stats",
3170 "Returns Rx adapter stats. Parameter: rxa_id");
3172 rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3173 handle_rxa_stats_reset,
3174 "Reset Rx adapter stats. Parameter: rxa_id");
3176 rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3177 handle_rxa_get_queue_conf,
3178 "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");