1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation.
10 #include <rte_cycles.h>
11 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_service_component.h>
18 #include <rte_thash.h>
19 #include <rte_interrupts.h>
20 #include <rte_mbuf_dyn.h>
21 #include <rte_telemetry.h>
23 #include "rte_eventdev.h"
24 #include "eventdev_pmd.h"
25 #include "eventdev_trace.h"
26 #include "rte_event_eth_rx_adapter.h"
29 #define BLOCK_CNT_THRESHOLD 10
30 #define ETH_EVENT_BUFFER_SIZE (6*BATCH_SIZE)
31 #define MAX_VECTOR_SIZE 1024
32 #define MIN_VECTOR_SIZE 4
33 #define MAX_VECTOR_NS 1E9
34 #define MIN_VECTOR_NS 1E5
36 #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
37 #define ETH_RX_ADAPTER_MEM_NAME_LEN 32
39 #define RSS_KEY_SIZE 40
40 /* value written to intr thread pipe to signal thread exit */
41 #define ETH_BRIDGE_INTR_THREAD_EXIT 1
42 /* Sentinel value to detect initialized file handle */
45 #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
48 * Used to store port and queue ID of interrupting Rx queue
60 * There is an instance of this struct per polled Rx queue added to the
63 struct eth_rx_poll_entry {
64 /* Eth port to poll */
66 /* Eth rx queue to poll */
70 struct eth_rx_vector_data {
71 TAILQ_ENTRY(eth_rx_vector_data) next;
74 uint16_t max_vector_count;
77 uint64_t vector_timeout_ticks;
78 struct rte_mempool *vector_pool;
79 struct rte_event_vector *vector_ev;
80 } __rte_cache_aligned;
82 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
84 /* Instance per adapter */
85 struct eth_event_enqueue_buffer {
86 /* Count of events in this buffer */
88 /* Array of events in this buffer */
89 struct rte_event *events;
90 /* size of event buffer */
92 /* Event enqueue happens from head */
94 /* New packets from rte_eth_rx_burst is enqued from tail */
96 /* last element in the buffer before rollover */
101 struct event_eth_rx_adapter {
103 uint8_t rss_key_be[RSS_KEY_SIZE];
104 /* Event device identifier */
106 /* Event port identifier */
107 uint8_t event_port_id;
108 /* Flag indicating per rxq event buffer */
109 bool use_queue_event_buf;
110 /* Per ethernet device structure */
111 struct eth_device_info *eth_devices;
112 /* Lock to serialize config updates with service function */
113 rte_spinlock_t rx_lock;
114 /* Max mbufs processed in any service function invocation */
116 /* Receive queues that need to be polled */
117 struct eth_rx_poll_entry *eth_rx_poll;
118 /* Size of the eth_rx_poll array */
119 uint16_t num_rx_polled;
120 /* Weighted round robin schedule */
122 /* wrr_sched[] size */
124 /* Next entry in wrr[] to begin polling */
126 /* Event burst buffer */
127 struct eth_event_enqueue_buffer event_enqueue_buffer;
128 /* Vector enable flag */
130 /* Timestamp of previous vector expiry list traversal */
131 uint64_t prev_expiry_ts;
132 /* Minimum ticks to wait before traversing expiry list */
133 uint64_t vector_tmo_ticks;
135 struct eth_rx_vector_data_list vector_list;
136 /* Per adapter stats */
137 struct rte_event_eth_rx_adapter_stats stats;
138 /* Block count, counts up to BLOCK_CNT_THRESHOLD */
139 uint16_t enq_block_count;
141 uint64_t rx_enq_block_start_ts;
142 /* epoll fd used to wait for Rx interrupts */
144 /* Num of interrupt driven interrupt queues */
145 uint32_t num_rx_intr;
146 /* Used to send <dev id, queue id> of interrupting Rx queues from
147 * the interrupt thread to the Rx thread
149 struct rte_ring *intr_ring;
150 /* Rx Queue data (dev id, queue id) for the last non-empty
154 /* queue_data is valid */
156 /* Interrupt ring lock, synchronizes Rx thread
157 * and interrupt thread
159 rte_spinlock_t intr_ring_lock;
160 /* event array passed to rte_poll_wait */
161 struct rte_epoll_event *epoll_events;
162 /* Count of interrupt vectors in use */
163 uint32_t num_intr_vec;
164 /* Thread blocked on Rx interrupts */
165 pthread_t rx_intr_thread;
166 /* Configuration callback for rte_service configuration */
167 rte_event_eth_rx_adapter_conf_cb conf_cb;
168 /* Configuration callback argument */
170 /* Set if default_cb is being used */
172 /* Service initialization state */
173 uint8_t service_inited;
174 /* Total count of Rx queues in adapter */
176 /* Memory allocation name */
177 char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
178 /* Socket identifier cached from eventdev */
180 /* Per adapter EAL service */
182 /* Adapter started flag */
186 } __rte_cache_aligned;
189 struct eth_device_info {
190 struct rte_eth_dev *dev;
191 struct eth_rx_queue_info *rx_queue;
193 rte_event_eth_rx_adapter_cb_fn cb_fn;
194 /* Rx callback argument */
196 /* Set if ethdev->eventdev packet transfer uses a
199 uint8_t internal_event_port;
200 /* Set if the adapter is processing rx queues for
201 * this eth device and packet processing has been
202 * started, allows for the code to know if the PMD
203 * rx_adapter_stop callback needs to be invoked
205 uint8_t dev_rx_started;
206 /* Number of queues added for this device */
207 uint16_t nb_dev_queues;
208 /* Number of poll based queues
209 * If nb_rx_poll > 0, the start callback will
210 * be invoked if not already invoked
213 /* Number of interrupt based queues
214 * If nb_rx_intr > 0, the start callback will
215 * be invoked if not already invoked.
218 /* Number of queues that use the shared interrupt */
219 uint16_t nb_shared_intr;
220 /* sum(wrr(q)) for all queues within the device
221 * useful when deleting all device queues
224 /* Intr based queue index to start polling from, this is used
225 * if the number of shared interrupts is non-zero
228 /* Intr based queue indices */
229 uint16_t *intr_queue;
230 /* device generates per Rx queue interrupt for queue index
231 * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
234 /* shared interrupt enabled */
235 int shared_intr_enabled;
239 struct eth_rx_queue_info {
240 int queue_enabled; /* True if added */
243 uint16_t wt; /* Polling weight */
244 uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
246 struct eth_rx_vector_data vector_data;
247 struct eth_event_enqueue_buffer *event_buf;
248 /* use adapter stats struct for queue level stats,
249 * as same stats need to be updated for adapter and queue
251 struct rte_event_eth_rx_adapter_stats *stats;
254 static struct event_eth_rx_adapter **event_eth_rx_adapter;
256 /* Enable dynamic timestamp field in mbuf */
257 static uint64_t event_eth_rx_timestamp_dynflag;
258 static int event_eth_rx_timestamp_dynfield_offset = -1;
260 static inline rte_mbuf_timestamp_t *
261 rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
263 return RTE_MBUF_DYNFIELD(mbuf,
264 event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
268 rxa_validate_id(uint8_t id)
270 return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
273 static inline struct eth_event_enqueue_buffer *
274 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
275 uint16_t rx_queue_id,
276 struct rte_event_eth_rx_adapter_stats **stats)
278 if (rx_adapter->use_queue_event_buf) {
279 struct eth_device_info *dev_info =
280 &rx_adapter->eth_devices[eth_dev_id];
281 *stats = dev_info->rx_queue[rx_queue_id].stats;
282 return dev_info->rx_queue[rx_queue_id].event_buf;
284 *stats = &rx_adapter->stats;
285 return &rx_adapter->event_enqueue_buffer;
289 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
290 if (!rxa_validate_id(id)) { \
291 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
297 rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
299 return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
302 /* Greatest common divisor */
303 static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
307 return r ? rxa_gcd_u16(b, r) : b;
310 /* Returns the next queue in the polling sequence
312 * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
315 rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
316 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
317 uint16_t gcd, int prev)
333 q = eth_rx_poll[i].eth_rx_qid;
334 d = eth_rx_poll[i].eth_dev_id;
335 w = rx_adapter->eth_devices[d].rx_queue[q].wt;
343 rxa_shared_intr(struct eth_device_info *dev_info,
348 if (dev_info->dev->intr_handle == NULL)
351 multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
352 return !multi_intr_cap ||
353 rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
357 rxa_intr_queue(struct eth_device_info *dev_info,
360 struct eth_rx_queue_info *queue_info;
362 queue_info = &dev_info->rx_queue[rx_queue_id];
363 return dev_info->rx_queue &&
364 !dev_info->internal_event_port &&
365 queue_info->queue_enabled && queue_info->wt == 0;
369 rxa_polled_queue(struct eth_device_info *dev_info,
372 struct eth_rx_queue_info *queue_info;
374 queue_info = &dev_info->rx_queue[rx_queue_id];
375 return !dev_info->internal_event_port &&
376 dev_info->rx_queue &&
377 queue_info->queue_enabled && queue_info->wt != 0;
380 /* Calculate change in number of vectors after Rx queue ID is add/deleted */
382 rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
388 nbq = dev_info->dev->data->nb_rx_queues;
389 n = 0; /* non shared count */
390 s = 0; /* shared count */
392 if (rx_queue_id == -1) {
393 for (i = 0; i < nbq; i++) {
394 if (!rxa_shared_intr(dev_info, i))
395 n += add ? !rxa_intr_queue(dev_info, i) :
396 rxa_intr_queue(dev_info, i);
398 s += add ? !rxa_intr_queue(dev_info, i) :
399 rxa_intr_queue(dev_info, i);
403 if ((add && dev_info->nb_shared_intr == 0) ||
404 (!add && dev_info->nb_shared_intr))
408 if (!rxa_shared_intr(dev_info, rx_queue_id))
409 n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
410 rxa_intr_queue(dev_info, rx_queue_id);
412 n = add ? !dev_info->nb_shared_intr :
413 dev_info->nb_shared_intr == 1;
419 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
422 rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
423 struct eth_device_info *dev_info, int rx_queue_id,
424 uint32_t *nb_rx_intr)
428 if (rx_queue_id == -1)
429 intr_diff = dev_info->nb_rx_intr;
431 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
433 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
436 /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
437 * interrupt queues could currently be poll mode Rx queues
440 rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
441 struct eth_device_info *dev_info, int rx_queue_id,
442 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
447 uint32_t wrr_len_diff;
449 if (rx_queue_id == -1) {
450 intr_diff = dev_info->dev->data->nb_rx_queues -
451 dev_info->nb_rx_intr;
452 poll_diff = dev_info->nb_rx_poll;
453 wrr_len_diff = dev_info->wrr_len;
455 intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
456 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
457 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
461 *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
462 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
463 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
466 /* Calculate size of the eth_rx_poll and wrr_sched arrays
467 * after deleting poll mode rx queues
470 rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
471 struct eth_device_info *dev_info, int rx_queue_id,
472 uint32_t *nb_rx_poll, uint32_t *nb_wrr)
475 uint32_t wrr_len_diff;
477 if (rx_queue_id == -1) {
478 poll_diff = dev_info->nb_rx_poll;
479 wrr_len_diff = dev_info->wrr_len;
481 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
482 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
486 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
487 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
490 /* Calculate nb_rx_* after adding poll mode rx queues
493 rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
494 struct eth_device_info *dev_info, int rx_queue_id,
495 uint16_t wt, uint32_t *nb_rx_poll,
496 uint32_t *nb_rx_intr, uint32_t *nb_wrr)
500 uint32_t wrr_len_diff;
502 if (rx_queue_id == -1) {
503 intr_diff = dev_info->nb_rx_intr;
504 poll_diff = dev_info->dev->data->nb_rx_queues -
505 dev_info->nb_rx_poll;
506 wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
509 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
510 poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
511 wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
512 wt - dev_info->rx_queue[rx_queue_id].wt :
516 *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
517 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
518 *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
521 /* Calculate nb_rx_* after adding rx_queue_id */
523 rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
524 struct eth_device_info *dev_info, int rx_queue_id,
525 uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
529 rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
530 wt, nb_rx_poll, nb_rx_intr, nb_wrr);
532 rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
533 nb_rx_poll, nb_rx_intr, nb_wrr);
536 /* Calculate nb_rx_* after deleting rx_queue_id */
538 rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
539 struct eth_device_info *dev_info, int rx_queue_id,
540 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
543 rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
545 rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
550 * Allocate the rx_poll array
552 static struct eth_rx_poll_entry *
553 rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
557 len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
558 RTE_CACHE_LINE_SIZE);
559 return rte_zmalloc_socket(rx_adapter->mem_name,
562 rx_adapter->socket_id);
566 * Allocate the WRR array
569 rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
573 len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
574 RTE_CACHE_LINE_SIZE);
575 return rte_zmalloc_socket(rx_adapter->mem_name,
578 rx_adapter->socket_id);
582 rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
583 uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
584 uint32_t **wrr_sched)
593 *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
594 if (*rx_poll == NULL) {
599 *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
600 if (*wrr_sched == NULL) {
607 /* Precalculate WRR polling sequence for all queues in rx_adapter */
609 rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
610 struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
618 /* Initialize variables for calculation of wrr schedule */
619 uint16_t max_wrr_pos = 0;
620 unsigned int poll_q = 0;
627 /* Generate array of all queues to poll, the size of this
630 RTE_ETH_FOREACH_DEV(d) {
631 uint16_t nb_rx_queues;
632 struct eth_device_info *dev_info =
633 &rx_adapter->eth_devices[d];
634 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
635 if (dev_info->rx_queue == NULL)
637 if (dev_info->internal_event_port)
639 dev_info->wrr_len = 0;
640 for (q = 0; q < nb_rx_queues; q++) {
641 struct eth_rx_queue_info *queue_info =
642 &dev_info->rx_queue[q];
645 if (!rxa_polled_queue(dev_info, q))
648 rx_poll[poll_q].eth_dev_id = d;
649 rx_poll[poll_q].eth_rx_qid = q;
651 dev_info->wrr_len += wt;
652 max_wt = RTE_MAX(max_wt, wt);
653 gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
658 /* Generate polling sequence based on weights */
661 for (i = 0; i < max_wrr_pos; i++) {
662 rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
663 rx_poll, max_wt, gcd, prev);
669 rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
670 struct rte_ipv6_hdr **ipv6_hdr)
672 struct rte_ether_hdr *eth_hdr =
673 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
674 struct rte_vlan_hdr *vlan_hdr;
679 switch (eth_hdr->ether_type) {
680 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
681 *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
684 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
685 *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
688 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
689 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
690 switch (vlan_hdr->eth_proto) {
691 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
692 *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
694 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
695 *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
707 /* Calculate RSS hash for IPv4/6 */
708 static inline uint32_t
709 rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
713 struct rte_ipv4_tuple ipv4_tuple;
714 struct rte_ipv6_tuple ipv6_tuple;
715 struct rte_ipv4_hdr *ipv4_hdr;
716 struct rte_ipv6_hdr *ipv6_hdr;
718 rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
721 ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
722 ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
724 input_len = RTE_THASH_V4_L3_LEN;
725 } else if (ipv6_hdr) {
726 rte_thash_load_v6_addrs(ipv6_hdr,
727 (union rte_thash_tuple *)&ipv6_tuple);
729 input_len = RTE_THASH_V6_L3_LEN;
733 return rte_softrss_be(tuple, input_len, rss_key_be);
737 rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
739 return !!rx_adapter->enq_block_count;
743 rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
745 if (rx_adapter->rx_enq_block_start_ts)
748 rx_adapter->enq_block_count++;
749 if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
752 rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
756 rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
757 struct rte_event_eth_rx_adapter_stats *stats)
759 if (unlikely(!stats->rx_enq_start_ts))
760 stats->rx_enq_start_ts = rte_get_tsc_cycles();
762 if (likely(!rxa_enq_blocked(rx_adapter)))
765 rx_adapter->enq_block_count = 0;
766 if (rx_adapter->rx_enq_block_start_ts) {
767 stats->rx_enq_end_ts = rte_get_tsc_cycles();
768 stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
769 rx_adapter->rx_enq_block_start_ts;
770 rx_adapter->rx_enq_block_start_ts = 0;
774 /* Enqueue buffered events to event device */
775 static inline uint16_t
776 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
777 struct eth_event_enqueue_buffer *buf,
778 struct rte_event_eth_rx_adapter_stats *stats)
780 uint16_t count = buf->last ? buf->last - buf->head : buf->count;
785 uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
786 rx_adapter->event_port_id,
787 &buf->events[buf->head],
790 stats->rx_enq_retry++;
794 if (buf->last && n == count) {
797 n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
798 rx_adapter->event_port_id,
803 stats->rx_enq_retry++;
811 n ? rxa_enq_block_end_ts(rx_adapter, stats) :
812 rxa_enq_block_start_ts(rx_adapter);
815 stats->rx_enq_count += n;
821 rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
822 struct eth_rx_vector_data *vec)
824 vec->vector_ev->nb_elem = 0;
825 vec->vector_ev->port = vec->port;
826 vec->vector_ev->queue = vec->queue;
827 vec->vector_ev->attr_valid = true;
828 TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
831 static inline uint16_t
832 rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
833 struct eth_rx_queue_info *queue_info,
834 struct eth_event_enqueue_buffer *buf,
835 struct rte_mbuf **mbufs, uint16_t num)
837 struct rte_event *ev = &buf->events[buf->count];
838 struct eth_rx_vector_data *vec;
839 uint16_t filled, space, sz;
842 vec = &queue_info->vector_data;
844 if (vec->vector_ev == NULL) {
845 if (rte_mempool_get(vec->vector_pool,
846 (void **)&vec->vector_ev) < 0) {
847 rte_pktmbuf_free_bulk(mbufs, num);
850 rxa_init_vector(rx_adapter, vec);
853 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
855 ev->event = vec->event;
856 ev->vec = vec->vector_ev;
859 vec->vector_ev = NULL;
860 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
861 if (rte_mempool_get(vec->vector_pool,
862 (void **)&vec->vector_ev) < 0) {
863 rte_pktmbuf_free_bulk(mbufs, num);
866 rxa_init_vector(rx_adapter, vec);
869 space = vec->max_vector_count - vec->vector_ev->nb_elem;
870 sz = num > space ? space : num;
871 memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
872 sizeof(void *) * sz);
873 vec->vector_ev->nb_elem += sz;
876 vec->ts = rte_rdtsc();
879 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
880 ev->event = vec->event;
881 ev->vec = vec->vector_ev;
884 vec->vector_ev = NULL;
885 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
892 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
893 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
894 struct eth_event_enqueue_buffer *buf,
895 struct rte_event_eth_rx_adapter_stats *stats)
898 struct eth_device_info *dev_info =
899 &rx_adapter->eth_devices[eth_dev_id];
900 struct eth_rx_queue_info *eth_rx_queue_info =
901 &dev_info->rx_queue[rx_queue_id];
902 uint16_t new_tail = buf->tail;
903 uint64_t event = eth_rx_queue_info->event;
904 uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
905 struct rte_mbuf *m = mbufs[0];
911 uint64_t ts, ts_mask;
913 if (!eth_rx_queue_info->ena_vector) {
914 ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
915 0 : rte_get_tsc_cycles();
917 /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
920 ts_mask = (uint64_t)(!(m->ol_flags &
921 event_eth_rx_timestamp_dynflag)) - 1ULL;
923 /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
924 rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
925 do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
926 for (i = 0; i < num; i++) {
927 struct rte_event *ev;
930 *rxa_timestamp_dynfield(m) = ts |
931 (*rxa_timestamp_dynfield(m) & ts_mask);
933 ev = &buf->events[new_tail];
935 rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
938 ev->flow_id = (rss & ~flow_id_mask) |
939 (ev->flow_id & flow_id_mask);
944 num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
948 if (num && dev_info->cb_fn) {
951 nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
953 (buf->events_size & ~buf->last_mask),
954 buf->count >= BATCH_SIZE ?
955 buf->count - BATCH_SIZE : 0,
956 &buf->events[buf->tail],
960 if (unlikely(nb_cb > num))
961 RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
966 stats->rx_dropped += dropped;
974 rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
976 uint32_t nb_req = buf->tail + BATCH_SIZE;
979 if (nb_req <= buf->events_size)
982 if (buf->head >= BATCH_SIZE) {
984 buf->last = buf->tail;
990 return nb_req <= buf->head;
993 /* Enqueue packets from <port, q> to event buffer */
994 static inline uint32_t
995 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
996 uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
997 int *rxq_empty, struct eth_event_enqueue_buffer *buf,
998 struct rte_event_eth_rx_adapter_stats *stats)
1000 struct rte_mbuf *mbufs[BATCH_SIZE];
1006 /* Don't do a batch dequeue from the rx queue if there isn't
1007 * enough space in the enqueue buffer.
1009 while (rxa_pkt_buf_available(buf)) {
1010 if (buf->count >= BATCH_SIZE)
1011 rxa_flush_event_buffer(rx_adapter, buf, stats);
1013 stats->rx_poll_count++;
1014 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
1020 rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
1023 if (rx_count + nb_rx > max_rx)
1028 rxa_flush_event_buffer(rx_adapter, buf, stats);
1030 stats->rx_packets += nb_rx;
1036 rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
1041 union queue_data qd;
1042 struct eth_device_info *dev_info;
1043 struct eth_rx_queue_info *queue_info;
1050 dev_info = &rx_adapter->eth_devices[port_id];
1051 queue_info = &dev_info->rx_queue[queue];
1052 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1053 if (rxa_shared_intr(dev_info, queue))
1054 intr_enabled = &dev_info->shared_intr_enabled;
1056 intr_enabled = &queue_info->intr_enabled;
1058 if (*intr_enabled) {
1060 err = rte_ring_enqueue(rx_adapter->intr_ring, data);
1061 /* Entry should always be available.
1062 * The ring size equals the maximum number of interrupt
1063 * vectors supported (an interrupt vector is shared in
1064 * case of shared interrupts)
1067 RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
1068 " to ring: %s", strerror(-err));
1070 rte_eth_dev_rx_intr_disable(port_id, queue);
1072 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1076 rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
1077 uint32_t num_intr_vec)
1079 if (rx_adapter->num_intr_vec + num_intr_vec >
1080 RTE_EVENT_ETH_INTR_RING_SIZE) {
1081 RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
1082 " %d needed %d limit %d", rx_adapter->num_intr_vec,
1083 num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
1090 /* Delete entries for (dev, queue) from the interrupt ring */
1092 rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
1093 struct eth_device_info *dev_info,
1094 uint16_t rx_queue_id)
1097 union queue_data qd;
1099 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1101 n = rte_ring_count(rx_adapter->intr_ring);
1102 for (i = 0; i < n; i++) {
1103 rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1104 if (!rxa_shared_intr(dev_info, rx_queue_id)) {
1105 if (qd.port == dev_info->dev->data->port_id &&
1106 qd.queue == rx_queue_id)
1109 if (qd.port == dev_info->dev->data->port_id)
1112 rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
1115 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1118 /* pthread callback handling interrupt mode receive queues
1119 * After receiving an Rx interrupt, it enqueues the port id and queue id of the
1120 * interrupting queue to the adapter's ring buffer for interrupt events.
1121 * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
1122 * the adapter service function.
1125 rxa_intr_thread(void *arg)
1127 struct event_eth_rx_adapter *rx_adapter = arg;
1128 struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
1132 n = rte_epoll_wait(rx_adapter->epd, epoll_events,
1133 RTE_EVENT_ETH_INTR_RING_SIZE, -1);
1134 if (unlikely(n < 0))
1135 RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
1137 for (i = 0; i < n; i++) {
1138 rxa_intr_ring_enqueue(rx_adapter,
1139 epoll_events[i].epdata.data);
1146 /* Dequeue <port, q> from interrupt ring and enqueue received
1150 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
1155 struct eth_event_enqueue_buffer *buf;
1156 struct rte_event_eth_rx_adapter_stats *stats;
1157 rte_spinlock_t *ring_lock;
1158 uint8_t max_done = 0;
1160 if (rx_adapter->num_rx_intr == 0)
1163 if (rte_ring_count(rx_adapter->intr_ring) == 0
1164 && !rx_adapter->qd_valid)
1167 buf = &rx_adapter->event_enqueue_buffer;
1168 stats = &rx_adapter->stats;
1169 ring_lock = &rx_adapter->intr_ring_lock;
1171 if (buf->count >= BATCH_SIZE)
1172 rxa_flush_event_buffer(rx_adapter, buf, stats);
1174 while (rxa_pkt_buf_available(buf)) {
1175 struct eth_device_info *dev_info;
1178 union queue_data qd = rx_adapter->qd;
1181 if (!rx_adapter->qd_valid) {
1182 struct eth_rx_queue_info *queue_info;
1184 rte_spinlock_lock(ring_lock);
1185 err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1187 rte_spinlock_unlock(ring_lock);
1193 rx_adapter->qd = qd;
1194 rx_adapter->qd_valid = 1;
1195 dev_info = &rx_adapter->eth_devices[port];
1196 if (rxa_shared_intr(dev_info, queue))
1197 dev_info->shared_intr_enabled = 1;
1199 queue_info = &dev_info->rx_queue[queue];
1200 queue_info->intr_enabled = 1;
1202 rte_eth_dev_rx_intr_enable(port, queue);
1203 rte_spinlock_unlock(ring_lock);
1208 dev_info = &rx_adapter->eth_devices[port];
1211 if (rxa_shared_intr(dev_info, queue)) {
1215 nb_queues = dev_info->dev->data->nb_rx_queues;
1217 for (i = dev_info->next_q_idx; i < nb_queues; i++) {
1218 uint8_t enq_buffer_full;
1220 if (!rxa_intr_queue(dev_info, i))
1222 n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
1223 rx_adapter->max_nb_rx,
1224 &rxq_empty, buf, stats);
1227 enq_buffer_full = !rxq_empty && n == 0;
1228 max_done = nb_rx > rx_adapter->max_nb_rx;
1230 if (enq_buffer_full || max_done) {
1231 dev_info->next_q_idx = i;
1236 rx_adapter->qd_valid = 0;
1238 /* Reinitialize for next interrupt */
1239 dev_info->next_q_idx = dev_info->multi_intr_cap ?
1240 RTE_MAX_RXTX_INTR_VEC_ID - 1 :
1243 n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
1244 rx_adapter->max_nb_rx,
1245 &rxq_empty, buf, stats);
1246 rx_adapter->qd_valid = !rxq_empty;
1248 if (nb_rx > rx_adapter->max_nb_rx)
1254 rx_adapter->stats.rx_intr_packets += nb_rx;
1258 * Polls receive queues added to the event adapter and enqueues received
1259 * packets to the event device.
1261 * The receive code enqueues initially to a temporary buffer, the
1262 * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
1264 * If there isn't space available in the temporary buffer, packets from the
1265 * Rx queue aren't dequeued from the eth device, this back pressures the
1266 * eth device, in virtual device environments this back pressure is relayed to
1267 * the hypervisor's switching layer where adjustments can be made to deal with
1271 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
1275 struct eth_event_enqueue_buffer *buf = NULL;
1276 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1280 wrr_pos = rx_adapter->wrr_pos;
1281 max_nb_rx = rx_adapter->max_nb_rx;
1283 /* Iterate through a WRR sequence */
1284 for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
1285 unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
1286 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
1287 uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
1289 buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
1291 /* Don't do a batch dequeue from the rx queue if there isn't
1292 * enough space in the enqueue buffer.
1294 if (buf->count >= BATCH_SIZE)
1295 rxa_flush_event_buffer(rx_adapter, buf, stats);
1296 if (!rxa_pkt_buf_available(buf)) {
1297 if (rx_adapter->use_queue_event_buf)
1298 goto poll_next_entry;
1300 rx_adapter->wrr_pos = wrr_pos;
1305 nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1307 if (nb_rx > max_nb_rx) {
1308 rx_adapter->wrr_pos =
1309 (wrr_pos + 1) % rx_adapter->wrr_len;
1314 if (++wrr_pos == rx_adapter->wrr_len)
1320 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
1322 struct event_eth_rx_adapter *rx_adapter = arg;
1323 struct eth_event_enqueue_buffer *buf = NULL;
1324 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1325 struct rte_event *ev;
1327 buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
1330 rxa_flush_event_buffer(rx_adapter, buf, stats);
1332 if (vec->vector_ev->nb_elem == 0)
1334 ev = &buf->events[buf->count];
1337 ev->event = vec->event;
1338 ev->vec = vec->vector_ev;
1341 vec->vector_ev = NULL;
1346 rxa_service_func(void *args)
1348 struct event_eth_rx_adapter *rx_adapter = args;
1350 if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
1352 if (!rx_adapter->rxa_started) {
1353 rte_spinlock_unlock(&rx_adapter->rx_lock);
1357 if (rx_adapter->ena_vector) {
1358 if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
1359 rx_adapter->vector_tmo_ticks) {
1360 struct eth_rx_vector_data *vec;
1362 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1363 uint64_t elapsed_time = rte_rdtsc() - vec->ts;
1365 if (elapsed_time >= vec->vector_timeout_ticks) {
1366 rxa_vector_expire(vec, rx_adapter);
1367 TAILQ_REMOVE(&rx_adapter->vector_list,
1371 rx_adapter->prev_expiry_ts = rte_rdtsc();
1375 rxa_intr_ring_dequeue(rx_adapter);
1376 rxa_poll(rx_adapter);
1378 rte_spinlock_unlock(&rx_adapter->rx_lock);
1384 rte_event_eth_rx_adapter_init(void)
1386 const char *name = RXA_ADAPTER_ARRAY;
1387 const struct rte_memzone *mz;
1390 sz = sizeof(*event_eth_rx_adapter) *
1391 RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
1392 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
1394 mz = rte_memzone_lookup(name);
1396 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
1397 RTE_CACHE_LINE_SIZE);
1399 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
1405 event_eth_rx_adapter = mz->addr;
1410 rxa_memzone_lookup(void)
1412 const struct rte_memzone *mz;
1414 if (event_eth_rx_adapter == NULL) {
1415 mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1418 event_eth_rx_adapter = mz->addr;
1424 static inline struct event_eth_rx_adapter *
1425 rxa_id_to_adapter(uint8_t id)
1427 return event_eth_rx_adapter ?
1428 event_eth_rx_adapter[id] : NULL;
1432 rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
1433 struct rte_event_eth_rx_adapter_conf *conf, void *arg)
1436 struct rte_eventdev *dev;
1437 struct rte_event_dev_config dev_conf;
1440 struct rte_event_port_conf *port_conf = arg;
1441 struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
1443 dev = &rte_eventdevs[rx_adapter->eventdev_id];
1444 dev_conf = dev->data->dev_conf;
1446 started = dev->data->dev_started;
1448 rte_event_dev_stop(dev_id);
1449 port_id = dev_conf.nb_event_ports;
1450 dev_conf.nb_event_ports += 1;
1451 ret = rte_event_dev_configure(dev_id, &dev_conf);
1453 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
1456 if (rte_event_dev_start(dev_id))
1462 ret = rte_event_port_setup(dev_id, port_id, port_conf);
1464 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
1469 conf->event_port_id = port_id;
1470 conf->max_nb_rx = 128;
1472 ret = rte_event_dev_start(dev_id);
1473 rx_adapter->default_cb_arg = 1;
1478 rxa_epoll_create1(void)
1482 fd = epoll_create1(EPOLL_CLOEXEC);
1483 return fd < 0 ? -errno : fd;
1490 rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
1492 if (rx_adapter->epd != INIT_FD)
1495 rx_adapter->epd = rxa_epoll_create1();
1496 if (rx_adapter->epd < 0) {
1497 int err = rx_adapter->epd;
1498 rx_adapter->epd = INIT_FD;
1499 RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
1507 rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1510 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1512 if (rx_adapter->intr_ring)
1515 rx_adapter->intr_ring = rte_ring_create("intr_ring",
1516 RTE_EVENT_ETH_INTR_RING_SIZE,
1517 rte_socket_id(), 0);
1518 if (!rx_adapter->intr_ring)
1521 rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
1522 RTE_EVENT_ETH_INTR_RING_SIZE *
1523 sizeof(struct rte_epoll_event),
1524 RTE_CACHE_LINE_SIZE,
1525 rx_adapter->socket_id);
1526 if (!rx_adapter->epoll_events) {
1531 rte_spinlock_init(&rx_adapter->intr_ring_lock);
1533 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
1534 "rx-intr-thread-%d", rx_adapter->id);
1536 err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
1537 NULL, rxa_intr_thread, rx_adapter);
1541 RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1542 rte_free(rx_adapter->epoll_events);
1544 rte_ring_free(rx_adapter->intr_ring);
1545 rx_adapter->intr_ring = NULL;
1546 rx_adapter->epoll_events = NULL;
1551 rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1555 err = pthread_cancel(rx_adapter->rx_intr_thread);
1557 RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
1560 err = pthread_join(rx_adapter->rx_intr_thread, NULL);
1562 RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
1564 rte_free(rx_adapter->epoll_events);
1565 rte_ring_free(rx_adapter->intr_ring);
1566 rx_adapter->intr_ring = NULL;
1567 rx_adapter->epoll_events = NULL;
1572 rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
1576 if (rx_adapter->num_rx_intr == 0)
1579 ret = rxa_destroy_intr_thread(rx_adapter);
1583 close(rx_adapter->epd);
1584 rx_adapter->epd = INIT_FD;
1590 rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
1591 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1594 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1595 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1597 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1599 RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
1604 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1609 RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
1612 dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
1614 dev_info->shared_intr_enabled = 0;
1619 rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1620 struct eth_device_info *dev_info, int rx_queue_id)
1626 if (dev_info->nb_rx_intr == 0)
1630 if (rx_queue_id == -1) {
1631 s = dev_info->nb_shared_intr;
1632 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1636 q = dev_info->intr_queue[i];
1637 sintr = rxa_shared_intr(dev_info, q);
1640 if (!sintr || s == 0) {
1642 err = rxa_disable_intr(rx_adapter, dev_info,
1646 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1651 if (!rxa_intr_queue(dev_info, rx_queue_id))
1653 if (!rxa_shared_intr(dev_info, rx_queue_id) ||
1654 dev_info->nb_shared_intr == 1) {
1655 err = rxa_disable_intr(rx_adapter, dev_info,
1659 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1663 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1664 if (dev_info->intr_queue[i] == rx_queue_id) {
1665 for (; i < dev_info->nb_rx_intr - 1; i++)
1666 dev_info->intr_queue[i] =
1667 dev_info->intr_queue[i + 1];
1677 rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
1678 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1681 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1682 union queue_data qd;
1684 uint16_t *intr_queue;
1685 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1687 if (rxa_intr_queue(dev_info, rx_queue_id))
1690 intr_queue = dev_info->intr_queue;
1691 if (dev_info->intr_queue == NULL) {
1693 dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
1694 dev_info->intr_queue =
1696 rx_adapter->mem_name,
1699 rx_adapter->socket_id);
1700 if (dev_info->intr_queue == NULL)
1704 init_fd = rx_adapter->epd;
1705 err = rxa_init_epd(rx_adapter);
1707 goto err_free_queue;
1709 qd.port = eth_dev_id;
1710 qd.queue = rx_queue_id;
1712 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1717 RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
1718 " Rx Queue %u err %d", rx_queue_id, err);
1722 err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
1724 RTE_EDEV_LOG_ERR("Could not enable interrupt for"
1725 " Rx Queue %u err %d", rx_queue_id, err);
1730 err = rxa_create_intr_thread(rx_adapter);
1733 dev_info->shared_intr_enabled = 1;
1735 dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
1740 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1742 RTE_EDEV_LOG_ERR("Could not disable interrupt for"
1743 " Rx Queue %u err %d", rx_queue_id, err);
1745 err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1750 RTE_EDEV_LOG_ERR("Could not delete event for"
1751 " Rx Queue %u err %d", rx_queue_id, err1);
1754 if (init_fd == INIT_FD) {
1755 close(rx_adapter->epd);
1756 rx_adapter->epd = -1;
1759 if (intr_queue == NULL)
1760 rte_free(dev_info->intr_queue);
1766 rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1767 struct eth_device_info *dev_info, int rx_queue_id)
1772 int shared_done = (dev_info->nb_shared_intr > 0);
1774 if (rx_queue_id != -1) {
1775 if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
1777 return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
1781 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
1783 if (rxa_shared_intr(dev_info, i) && shared_done)
1786 err = rxa_config_intr(rx_adapter, dev_info, i);
1788 shared_done = err == 0 && rxa_shared_intr(dev_info, i);
1791 dev_info->shared_intr_enabled = 1;
1800 shared_done = (dev_info->nb_shared_intr > 0);
1801 for (j = 0; j < i; j++) {
1802 if (rxa_intr_queue(dev_info, j))
1804 if (rxa_shared_intr(dev_info, j) && si != j)
1806 err = rxa_disable_intr(rx_adapter, dev_info, j);
1816 rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
1819 struct rte_service_spec service;
1820 struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
1822 if (rx_adapter->service_inited)
1825 memset(&service, 0, sizeof(service));
1826 snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
1827 "rte_event_eth_rx_adapter_%d", id);
1828 service.socket_id = rx_adapter->socket_id;
1829 service.callback = rxa_service_func;
1830 service.callback_userdata = rx_adapter;
1831 /* Service function handles locking for queue add/del updates */
1832 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
1833 ret = rte_service_component_register(&service, &rx_adapter->service_id);
1835 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
1840 ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
1841 &rx_adapter_conf, rx_adapter->conf_arg);
1843 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
1847 rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
1848 rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
1849 rx_adapter->service_inited = 1;
1850 rx_adapter->epd = INIT_FD;
1854 rte_service_component_unregister(rx_adapter->service_id);
1859 rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
1860 struct eth_device_info *dev_info, int32_t rx_queue_id,
1863 struct eth_rx_queue_info *queue_info;
1867 if (dev_info->rx_queue == NULL)
1870 if (rx_queue_id == -1) {
1871 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
1872 rxa_update_queue(rx_adapter, dev_info, i, add);
1874 queue_info = &dev_info->rx_queue[rx_queue_id];
1875 enabled = queue_info->queue_enabled;
1877 rx_adapter->nb_queues += !enabled;
1878 dev_info->nb_dev_queues += !enabled;
1880 rx_adapter->nb_queues -= enabled;
1881 dev_info->nb_dev_queues -= enabled;
1883 queue_info->queue_enabled = !!add;
1888 rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
1889 uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
1892 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
1893 struct eth_rx_vector_data *vector_data;
1896 vector_data = &queue_info->vector_data;
1897 vector_data->max_vector_count = vector_count;
1898 vector_data->port = port_id;
1899 vector_data->queue = qid;
1900 vector_data->vector_pool = mp;
1901 vector_data->vector_timeout_ticks =
1902 NSEC2TICK(vector_ns, rte_get_timer_hz());
1903 vector_data->ts = 0;
1904 flow_id = queue_info->event & 0xFFFFF;
1906 flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
1907 vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
1911 rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
1912 struct eth_device_info *dev_info, int32_t rx_queue_id)
1914 struct eth_rx_vector_data *vec;
1920 if (rx_adapter->nb_queues == 0)
1923 if (rx_queue_id == -1) {
1924 uint16_t nb_rx_queues;
1927 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1928 for (i = 0; i < nb_rx_queues; i++)
1929 rxa_sw_del(rx_adapter, dev_info, i);
1933 /* Push all the partial event vectors to event device. */
1934 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1935 if (vec->queue != rx_queue_id)
1937 rxa_vector_expire(vec, rx_adapter);
1938 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
1941 pollq = rxa_polled_queue(dev_info, rx_queue_id);
1942 intrq = rxa_intr_queue(dev_info, rx_queue_id);
1943 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1944 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
1945 rx_adapter->num_rx_polled -= pollq;
1946 dev_info->nb_rx_poll -= pollq;
1947 rx_adapter->num_rx_intr -= intrq;
1948 dev_info->nb_rx_intr -= intrq;
1949 dev_info->nb_shared_intr -= intrq && sintrq;
1950 if (rx_adapter->use_queue_event_buf) {
1951 struct eth_event_enqueue_buffer *event_buf =
1952 dev_info->rx_queue[rx_queue_id].event_buf;
1953 struct rte_event_eth_rx_adapter_stats *stats =
1954 dev_info->rx_queue[rx_queue_id].stats;
1955 rte_free(event_buf->events);
1956 rte_free(event_buf);
1958 dev_info->rx_queue[rx_queue_id].event_buf = NULL;
1959 dev_info->rx_queue[rx_queue_id].stats = NULL;
1964 rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
1965 struct eth_device_info *dev_info, int32_t rx_queue_id,
1966 const struct rte_event_eth_rx_adapter_queue_conf *conf)
1968 struct eth_rx_queue_info *queue_info;
1969 const struct rte_event *ev = &conf->ev;
1973 struct rte_event *qi_ev;
1974 struct eth_event_enqueue_buffer *new_rx_buf = NULL;
1975 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1976 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1979 if (rx_queue_id == -1) {
1980 uint16_t nb_rx_queues;
1983 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1984 for (i = 0; i < nb_rx_queues; i++) {
1985 ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
1992 pollq = rxa_polled_queue(dev_info, rx_queue_id);
1993 intrq = rxa_intr_queue(dev_info, rx_queue_id);
1994 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1996 queue_info = &dev_info->rx_queue[rx_queue_id];
1997 queue_info->wt = conf->servicing_weight;
1999 qi_ev = (struct rte_event *)&queue_info->event;
2000 qi_ev->event = ev->event;
2001 qi_ev->op = RTE_EVENT_OP_NEW;
2002 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
2003 qi_ev->sub_event_type = 0;
2005 if (conf->rx_queue_flags &
2006 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
2007 queue_info->flow_id_mask = ~0;
2011 if (conf->rx_queue_flags &
2012 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2013 queue_info->ena_vector = 1;
2014 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
2015 rxa_set_vector_data(queue_info, conf->vector_sz,
2016 conf->vector_timeout_ns, conf->vector_mp,
2017 rx_queue_id, dev_info->dev->data->port_id);
2018 rx_adapter->ena_vector = 1;
2019 rx_adapter->vector_tmo_ticks =
2020 rx_adapter->vector_tmo_ticks ?
2021 RTE_MIN(queue_info->vector_data
2022 .vector_timeout_ticks >>
2024 rx_adapter->vector_tmo_ticks) :
2025 queue_info->vector_data.vector_timeout_ticks >>
2029 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
2030 if (rxa_polled_queue(dev_info, rx_queue_id)) {
2031 rx_adapter->num_rx_polled += !pollq;
2032 dev_info->nb_rx_poll += !pollq;
2033 rx_adapter->num_rx_intr -= intrq;
2034 dev_info->nb_rx_intr -= intrq;
2035 dev_info->nb_shared_intr -= intrq && sintrq;
2038 if (rxa_intr_queue(dev_info, rx_queue_id)) {
2039 rx_adapter->num_rx_polled -= pollq;
2040 dev_info->nb_rx_poll -= pollq;
2041 rx_adapter->num_rx_intr += !intrq;
2042 dev_info->nb_rx_intr += !intrq;
2043 dev_info->nb_shared_intr += !intrq && sintrq;
2044 if (dev_info->nb_shared_intr == 1) {
2045 if (dev_info->multi_intr_cap)
2046 dev_info->next_q_idx =
2047 RTE_MAX_RXTX_INTR_VEC_ID - 1;
2049 dev_info->next_q_idx = 0;
2053 if (!rx_adapter->use_queue_event_buf)
2056 new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2057 sizeof(*new_rx_buf), 0,
2058 rte_eth_dev_socket_id(eth_dev_id));
2059 if (new_rx_buf == NULL) {
2060 RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2061 "dev_id: %d queue_id: %d",
2062 eth_dev_id, rx_queue_id);
2066 new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2067 new_rx_buf->events_size += (2 * BATCH_SIZE);
2068 new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2069 sizeof(struct rte_event) *
2070 new_rx_buf->events_size, 0,
2071 rte_eth_dev_socket_id(eth_dev_id));
2072 if (new_rx_buf->events == NULL) {
2073 rte_free(new_rx_buf);
2074 RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2075 "dev_id: %d queue_id: %d",
2076 eth_dev_id, rx_queue_id);
2080 queue_info->event_buf = new_rx_buf;
2082 /* Allocate storage for adapter queue stats */
2083 stats = rte_zmalloc_socket("rx_queue_stats",
2085 rte_eth_dev_socket_id(eth_dev_id));
2086 if (stats == NULL) {
2087 rte_free(new_rx_buf->events);
2088 rte_free(new_rx_buf);
2089 RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
2090 " dev_id: %d queue_id: %d",
2091 eth_dev_id, rx_queue_id);
2095 queue_info->stats = stats;
2101 rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
2103 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2105 struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
2106 struct rte_event_eth_rx_adapter_queue_conf temp_conf;
2108 struct eth_rx_poll_entry *rx_poll;
2109 struct eth_rx_queue_info *rx_queue;
2111 uint16_t nb_rx_queues;
2112 uint32_t nb_rx_poll, nb_wrr;
2113 uint32_t nb_rx_intr;
2117 if (queue_conf->servicing_weight == 0) {
2118 struct rte_eth_dev_data *data = dev_info->dev->data;
2120 temp_conf = *queue_conf;
2121 if (!data->dev_conf.intr_conf.rxq) {
2122 /* If Rx interrupts are disabled set wt = 1 */
2123 temp_conf.servicing_weight = 1;
2125 queue_conf = &temp_conf;
2127 if (queue_conf->servicing_weight == 0 &&
2128 rx_adapter->use_queue_event_buf) {
2130 RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2131 "not supported for interrupt queues "
2132 "dev_id: %d queue_id: %d",
2133 eth_dev_id, rx_queue_id);
2138 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2139 rx_queue = dev_info->rx_queue;
2140 wt = queue_conf->servicing_weight;
2142 if (dev_info->rx_queue == NULL) {
2143 dev_info->rx_queue =
2144 rte_zmalloc_socket(rx_adapter->mem_name,
2146 sizeof(struct eth_rx_queue_info), 0,
2147 rx_adapter->socket_id);
2148 if (dev_info->rx_queue == NULL)
2154 rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
2155 queue_conf->servicing_weight,
2156 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2158 if (dev_info->dev->intr_handle)
2159 dev_info->multi_intr_cap =
2160 rte_intr_cap_multiple(dev_info->dev->intr_handle);
2162 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2165 goto err_free_rxqueue;
2168 num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
2170 ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
2172 goto err_free_rxqueue;
2174 ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
2176 goto err_free_rxqueue;
2180 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2181 num_intr_vec = rxa_nb_intr_vect(dev_info,
2183 /* interrupt based queues are being converted to
2184 * poll mode queues, delete the interrupt configuration
2187 ret = rxa_del_intr_queue(rx_adapter,
2188 dev_info, rx_queue_id);
2190 goto err_free_rxqueue;
2194 if (nb_rx_intr == 0) {
2195 ret = rxa_free_intr_resources(rx_adapter);
2197 goto err_free_rxqueue;
2203 if (rx_queue_id == -1) {
2204 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
2205 dev_info->intr_queue[i] = i;
2207 if (!rxa_intr_queue(dev_info, rx_queue_id))
2208 dev_info->intr_queue[nb_rx_intr - 1] =
2215 ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2217 goto err_free_rxqueue;
2218 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2220 rte_free(rx_adapter->eth_rx_poll);
2221 rte_free(rx_adapter->wrr_sched);
2223 rx_adapter->eth_rx_poll = rx_poll;
2224 rx_adapter->wrr_sched = rx_wrr;
2225 rx_adapter->wrr_len = nb_wrr;
2226 rx_adapter->num_intr_vec += num_intr_vec;
2230 if (rx_queue == NULL) {
2231 rte_free(dev_info->rx_queue);
2232 dev_info->rx_queue = NULL;
2242 rxa_ctrl(uint8_t id, int start)
2244 struct event_eth_rx_adapter *rx_adapter;
2245 struct rte_eventdev *dev;
2246 struct eth_device_info *dev_info;
2248 int use_service = 0;
2251 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2252 rx_adapter = rxa_id_to_adapter(id);
2253 if (rx_adapter == NULL)
2256 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2258 RTE_ETH_FOREACH_DEV(i) {
2259 dev_info = &rx_adapter->eth_devices[i];
2260 /* if start check for num dev queues */
2261 if (start && !dev_info->nb_dev_queues)
2263 /* if stop check if dev has been started */
2264 if (stop && !dev_info->dev_rx_started)
2266 use_service |= !dev_info->internal_event_port;
2267 dev_info->dev_rx_started = start;
2268 if (dev_info->internal_event_port == 0)
2270 start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
2271 &rte_eth_devices[i]) :
2272 (*dev->dev_ops->eth_rx_adapter_stop)(dev,
2273 &rte_eth_devices[i]);
2277 rte_spinlock_lock(&rx_adapter->rx_lock);
2278 rx_adapter->rxa_started = start;
2279 rte_service_runstate_set(rx_adapter->service_id, start);
2280 rte_spinlock_unlock(&rx_adapter->rx_lock);
2287 rxa_create(uint8_t id, uint8_t dev_id,
2288 struct rte_event_eth_rx_adapter_params *rxa_params,
2289 rte_event_eth_rx_adapter_conf_cb conf_cb,
2292 struct event_eth_rx_adapter *rx_adapter;
2293 struct eth_event_enqueue_buffer *buf;
2294 struct rte_event *events;
2298 char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
2299 const uint8_t default_rss_key[] = {
2300 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
2301 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
2302 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
2303 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
2304 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
2307 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2308 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2310 if (conf_cb == NULL)
2313 if (event_eth_rx_adapter == NULL) {
2314 ret = rte_event_eth_rx_adapter_init();
2319 rx_adapter = rxa_id_to_adapter(id);
2320 if (rx_adapter != NULL) {
2321 RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
2325 socket_id = rte_event_dev_socket_id(dev_id);
2326 snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
2327 "rte_event_eth_rx_adapter_%d",
2330 rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
2331 RTE_CACHE_LINE_SIZE, socket_id);
2332 if (rx_adapter == NULL) {
2333 RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
2337 rx_adapter->eventdev_id = dev_id;
2338 rx_adapter->socket_id = socket_id;
2339 rx_adapter->conf_cb = conf_cb;
2340 rx_adapter->conf_arg = conf_arg;
2341 rx_adapter->id = id;
2342 TAILQ_INIT(&rx_adapter->vector_list);
2343 strcpy(rx_adapter->mem_name, mem_name);
2344 rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
2346 sizeof(struct eth_device_info), 0,
2348 rte_convert_rss_key((const uint32_t *)default_rss_key,
2349 (uint32_t *)rx_adapter->rss_key_be,
2350 RTE_DIM(default_rss_key));
2352 if (rx_adapter->eth_devices == NULL) {
2353 RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
2354 rte_free(rx_adapter);
2358 rte_spinlock_init(&rx_adapter->rx_lock);
2360 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
2361 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
2363 /* Rx adapter event buffer allocation */
2364 rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2366 if (!rx_adapter->use_queue_event_buf) {
2367 buf = &rx_adapter->event_enqueue_buffer;
2368 buf->events_size = rxa_params->event_buf_size;
2370 events = rte_zmalloc_socket(rx_adapter->mem_name,
2371 buf->events_size * sizeof(*events),
2373 if (events == NULL) {
2374 RTE_EDEV_LOG_ERR("Failed to allocate memory "
2375 "for adapter event buffer");
2376 rte_free(rx_adapter->eth_devices);
2377 rte_free(rx_adapter);
2381 rx_adapter->event_enqueue_buffer.events = events;
2384 event_eth_rx_adapter[id] = rx_adapter;
2386 if (conf_cb == rxa_default_conf_cb)
2387 rx_adapter->default_cb_arg = 1;
2389 if (rte_mbuf_dyn_rx_timestamp_register(
2390 &event_eth_rx_timestamp_dynfield_offset,
2391 &event_eth_rx_timestamp_dynflag) != 0) {
2392 RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
2396 rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
2402 rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2403 rte_event_eth_rx_adapter_conf_cb conf_cb,
2406 struct rte_event_eth_rx_adapter_params rxa_params = {0};
2408 /* use default values for adapter params */
2409 rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2410 rxa_params.use_queue_event_buf = false;
2412 return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2416 rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2417 struct rte_event_port_conf *port_config,
2418 struct rte_event_eth_rx_adapter_params *rxa_params)
2420 struct rte_event_port_conf *pc;
2422 struct rte_event_eth_rx_adapter_params temp_params = {0};
2424 if (port_config == NULL)
2427 if (rxa_params == NULL) {
2428 /* use default values if rxa_params is NULL */
2429 rxa_params = &temp_params;
2430 rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2431 rxa_params->use_queue_event_buf = false;
2432 } else if ((!rxa_params->use_queue_event_buf &&
2433 rxa_params->event_buf_size == 0) ||
2434 (rxa_params->use_queue_event_buf &&
2435 rxa_params->event_buf_size != 0)) {
2436 RTE_EDEV_LOG_ERR("Invalid adapter params\n");
2438 } else if (!rxa_params->use_queue_event_buf) {
2439 /* adjust event buff size with BATCH_SIZE used for fetching
2440 * packets from NIC rx queues to get full buffer utilization
2441 * and prevent unnecessary rollovers.
2444 rxa_params->event_buf_size =
2445 RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
2446 rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2449 pc = rte_malloc(NULL, sizeof(*pc), 0);
2455 ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2463 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
2464 struct rte_event_port_conf *port_config)
2466 struct rte_event_port_conf *pc;
2469 if (port_config == NULL)
2472 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2474 pc = rte_malloc(NULL, sizeof(*pc), 0);
2479 ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
2480 rxa_default_conf_cb,
2488 rte_event_eth_rx_adapter_free(uint8_t id)
2490 struct event_eth_rx_adapter *rx_adapter;
2492 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2494 rx_adapter = rxa_id_to_adapter(id);
2495 if (rx_adapter == NULL)
2498 if (rx_adapter->nb_queues) {
2499 RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
2500 rx_adapter->nb_queues);
2504 if (rx_adapter->default_cb_arg)
2505 rte_free(rx_adapter->conf_arg);
2506 rte_free(rx_adapter->eth_devices);
2507 if (!rx_adapter->use_queue_event_buf)
2508 rte_free(rx_adapter->event_enqueue_buffer.events);
2509 rte_free(rx_adapter);
2510 event_eth_rx_adapter[id] = NULL;
2512 rte_eventdev_trace_eth_rx_adapter_free(id);
2517 rte_event_eth_rx_adapter_queue_add(uint8_t id,
2518 uint16_t eth_dev_id,
2519 int32_t rx_queue_id,
2520 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2524 struct event_eth_rx_adapter *rx_adapter;
2525 struct rte_eventdev *dev;
2526 struct eth_device_info *dev_info;
2527 struct rte_event_eth_rx_adapter_vector_limits limits;
2529 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2530 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2532 rx_adapter = rxa_id_to_adapter(id);
2533 if ((rx_adapter == NULL) || (queue_conf == NULL))
2536 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2537 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2541 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2542 "eth port %" PRIu16, id, eth_dev_id);
2546 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
2547 && (queue_conf->rx_queue_flags &
2548 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
2549 RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
2550 " eth port: %" PRIu16 " adapter id: %" PRIu8,
2555 if (queue_conf->rx_queue_flags &
2556 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2558 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
2559 RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2560 " eth port: %" PRIu16
2561 " adapter id: %" PRIu8,
2566 ret = rte_event_eth_rx_adapter_vector_limits_get(
2567 rx_adapter->eventdev_id, eth_dev_id, &limits);
2569 RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2570 " eth port: %" PRIu16
2571 " adapter id: %" PRIu8,
2575 if (queue_conf->vector_sz < limits.min_sz ||
2576 queue_conf->vector_sz > limits.max_sz ||
2577 queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2578 queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2579 queue_conf->vector_mp == NULL) {
2580 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2581 " eth port: %" PRIu16
2582 " adapter id: %" PRIu8,
2586 if (queue_conf->vector_mp->elt_size <
2587 (sizeof(struct rte_event_vector) +
2588 (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2589 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2590 " eth port: %" PRIu16
2591 " adapter id: %" PRIu8,
2597 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
2598 (rx_queue_id != -1)) {
2599 RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
2600 "event queue, eth port: %" PRIu16 " adapter id: %"
2601 PRIu8, eth_dev_id, id);
2605 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2606 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2607 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2608 (uint16_t)rx_queue_id);
2612 if ((rx_adapter->use_queue_event_buf &&
2613 queue_conf->event_buf_size == 0) ||
2614 (!rx_adapter->use_queue_event_buf &&
2615 queue_conf->event_buf_size != 0)) {
2616 RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2620 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2622 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2623 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
2625 if (dev_info->rx_queue == NULL) {
2626 dev_info->rx_queue =
2627 rte_zmalloc_socket(rx_adapter->mem_name,
2628 dev_info->dev->data->nb_rx_queues *
2629 sizeof(struct eth_rx_queue_info), 0,
2630 rx_adapter->socket_id);
2631 if (dev_info->rx_queue == NULL)
2635 ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
2636 &rte_eth_devices[eth_dev_id],
2637 rx_queue_id, queue_conf);
2639 dev_info->internal_event_port = 1;
2640 rxa_update_queue(rx_adapter,
2641 &rx_adapter->eth_devices[eth_dev_id],
2646 rte_spinlock_lock(&rx_adapter->rx_lock);
2647 dev_info->internal_event_port = 0;
2648 ret = rxa_init_service(rx_adapter, id);
2650 uint32_t service_id = rx_adapter->service_id;
2651 ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
2653 rte_service_component_runstate_set(service_id,
2654 rxa_sw_adapter_queue_count(rx_adapter));
2656 rte_spinlock_unlock(&rx_adapter->rx_lock);
2659 rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
2660 rx_queue_id, queue_conf, ret);
2668 rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
2670 limits->max_sz = MAX_VECTOR_SIZE;
2671 limits->min_sz = MIN_VECTOR_SIZE;
2672 limits->max_timeout_ns = MAX_VECTOR_NS;
2673 limits->min_timeout_ns = MIN_VECTOR_NS;
2679 rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
2680 int32_t rx_queue_id)
2683 struct rte_eventdev *dev;
2684 struct event_eth_rx_adapter *rx_adapter;
2685 struct eth_device_info *dev_info;
2687 uint32_t nb_rx_poll = 0;
2688 uint32_t nb_wrr = 0;
2689 uint32_t nb_rx_intr;
2690 struct eth_rx_poll_entry *rx_poll = NULL;
2691 uint32_t *rx_wrr = NULL;
2694 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2695 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2697 rx_adapter = rxa_id_to_adapter(id);
2698 if (rx_adapter == NULL)
2701 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2702 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2708 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2709 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2710 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2711 (uint16_t)rx_queue_id);
2715 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2717 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2718 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
2720 ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
2721 &rte_eth_devices[eth_dev_id],
2724 rxa_update_queue(rx_adapter,
2725 &rx_adapter->eth_devices[eth_dev_id],
2728 if (dev_info->nb_dev_queues == 0) {
2729 rte_free(dev_info->rx_queue);
2730 dev_info->rx_queue = NULL;
2734 rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
2735 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2737 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2742 rte_spinlock_lock(&rx_adapter->rx_lock);
2745 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2747 num_intr_vec = rxa_nb_intr_vect(dev_info,
2749 ret = rxa_del_intr_queue(rx_adapter, dev_info,
2755 if (nb_rx_intr == 0) {
2756 ret = rxa_free_intr_resources(rx_adapter);
2761 rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
2762 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2764 rte_free(rx_adapter->eth_rx_poll);
2765 rte_free(rx_adapter->wrr_sched);
2767 if (nb_rx_intr == 0) {
2768 rte_free(dev_info->intr_queue);
2769 dev_info->intr_queue = NULL;
2772 rx_adapter->eth_rx_poll = rx_poll;
2773 rx_adapter->wrr_sched = rx_wrr;
2774 rx_adapter->wrr_len = nb_wrr;
2776 * reset next poll start position (wrr_pos) to avoid buffer
2777 * overrun when wrr_len is reduced in case of queue delete
2779 rx_adapter->wrr_pos = 0;
2780 rx_adapter->num_intr_vec += num_intr_vec;
2782 if (dev_info->nb_dev_queues == 0) {
2783 rte_free(dev_info->rx_queue);
2784 dev_info->rx_queue = NULL;
2787 rte_spinlock_unlock(&rx_adapter->rx_lock);
2794 rte_service_component_runstate_set(rx_adapter->service_id,
2795 rxa_sw_adapter_queue_count(rx_adapter));
2798 rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
2804 rte_event_eth_rx_adapter_vector_limits_get(
2805 uint8_t dev_id, uint16_t eth_port_id,
2806 struct rte_event_eth_rx_adapter_vector_limits *limits)
2808 struct rte_eventdev *dev;
2812 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2813 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
2818 dev = &rte_eventdevs[dev_id];
2820 ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
2822 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2823 "eth port %" PRIu16,
2824 dev_id, eth_port_id);
2828 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2829 RTE_FUNC_PTR_OR_ERR_RET(
2830 *dev->dev_ops->eth_rx_adapter_vector_limits_get,
2832 ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
2833 dev, &rte_eth_devices[eth_port_id], limits);
2835 ret = rxa_sw_vector_limits(limits);
2842 rte_event_eth_rx_adapter_start(uint8_t id)
2844 rte_eventdev_trace_eth_rx_adapter_start(id);
2845 return rxa_ctrl(id, 1);
2849 rte_event_eth_rx_adapter_stop(uint8_t id)
2851 rte_eventdev_trace_eth_rx_adapter_stop(id);
2852 return rxa_ctrl(id, 0);
2856 rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
2858 struct rte_event_eth_rx_adapter_stats *q_stats;
2860 q_stats = queue_info->stats;
2861 memset(q_stats, 0, sizeof(*q_stats));
2865 rte_event_eth_rx_adapter_stats_get(uint8_t id,
2866 struct rte_event_eth_rx_adapter_stats *stats)
2868 struct event_eth_rx_adapter *rx_adapter;
2869 struct eth_event_enqueue_buffer *buf;
2870 struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
2871 struct rte_event_eth_rx_adapter_stats dev_stats;
2872 struct rte_eventdev *dev;
2873 struct eth_device_info *dev_info;
2874 struct eth_rx_queue_info *queue_info;
2875 struct rte_event_eth_rx_adapter_stats *q_stats;
2879 if (rxa_memzone_lookup())
2882 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2884 rx_adapter = rxa_id_to_adapter(id);
2885 if (rx_adapter == NULL || stats == NULL)
2888 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2889 memset(stats, 0, sizeof(*stats));
2891 if (rx_adapter->service_inited)
2892 *stats = rx_adapter->stats;
2894 RTE_ETH_FOREACH_DEV(i) {
2895 dev_info = &rx_adapter->eth_devices[i];
2897 if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
2899 for (j = 0; j < dev_info->dev->data->nb_rx_queues;
2901 queue_info = &dev_info->rx_queue[j];
2902 if (!queue_info->queue_enabled)
2904 q_stats = queue_info->stats;
2906 stats->rx_packets += q_stats->rx_packets;
2907 stats->rx_poll_count += q_stats->rx_poll_count;
2908 stats->rx_enq_count += q_stats->rx_enq_count;
2909 stats->rx_enq_retry += q_stats->rx_enq_retry;
2910 stats->rx_dropped += q_stats->rx_dropped;
2911 stats->rx_enq_block_cycles +=
2912 q_stats->rx_enq_block_cycles;
2916 if (dev_info->internal_event_port == 0 ||
2917 dev->dev_ops->eth_rx_adapter_stats_get == NULL)
2919 ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
2920 &rte_eth_devices[i],
2924 dev_stats_sum.rx_packets += dev_stats.rx_packets;
2925 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
2928 buf = &rx_adapter->event_enqueue_buffer;
2929 stats->rx_packets += dev_stats_sum.rx_packets;
2930 stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2931 stats->rx_event_buf_count = buf->count;
2932 stats->rx_event_buf_size = buf->events_size;
2938 rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
2939 uint16_t eth_dev_id,
2940 uint16_t rx_queue_id,
2941 struct rte_event_eth_rx_adapter_queue_stats *stats)
2943 struct event_eth_rx_adapter *rx_adapter;
2944 struct eth_device_info *dev_info;
2945 struct eth_rx_queue_info *queue_info;
2946 struct eth_event_enqueue_buffer *event_buf;
2947 struct rte_event_eth_rx_adapter_stats *q_stats;
2948 struct rte_eventdev *dev;
2950 if (rxa_memzone_lookup())
2953 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2954 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2956 rx_adapter = rxa_id_to_adapter(id);
2958 if (rx_adapter == NULL || stats == NULL)
2961 if (!rx_adapter->use_queue_event_buf)
2964 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2965 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
2969 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2970 if (dev_info->rx_queue == NULL ||
2971 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
2972 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
2976 queue_info = &dev_info->rx_queue[rx_queue_id];
2977 event_buf = queue_info->event_buf;
2978 q_stats = queue_info->stats;
2980 stats->rx_event_buf_count = event_buf->count;
2981 stats->rx_event_buf_size = event_buf->events_size;
2982 stats->rx_packets = q_stats->rx_packets;
2983 stats->rx_poll_count = q_stats->rx_poll_count;
2984 stats->rx_dropped = q_stats->rx_dropped;
2986 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2987 if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
2988 return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
2989 &rte_eth_devices[eth_dev_id],
2990 rx_queue_id, stats);
2997 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
2999 struct event_eth_rx_adapter *rx_adapter;
3000 struct rte_eventdev *dev;
3001 struct eth_device_info *dev_info;
3002 struct eth_rx_queue_info *queue_info;
3005 if (rxa_memzone_lookup())
3008 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3010 rx_adapter = rxa_id_to_adapter(id);
3011 if (rx_adapter == NULL)
3014 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3016 RTE_ETH_FOREACH_DEV(i) {
3017 dev_info = &rx_adapter->eth_devices[i];
3019 if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
3021 for (j = 0; j < dev_info->dev->data->nb_rx_queues;
3023 queue_info = &dev_info->rx_queue[j];
3024 if (!queue_info->queue_enabled)
3026 rxa_queue_stats_reset(queue_info);
3030 if (dev_info->internal_event_port == 0 ||
3031 dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
3033 (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
3034 &rte_eth_devices[i]);
3037 memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
3043 rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
3044 uint16_t eth_dev_id,
3045 uint16_t rx_queue_id)
3047 struct event_eth_rx_adapter *rx_adapter;
3048 struct eth_device_info *dev_info;
3049 struct eth_rx_queue_info *queue_info;
3050 struct rte_eventdev *dev;
3052 if (rxa_memzone_lookup())
3055 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3056 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3058 rx_adapter = rxa_id_to_adapter(id);
3059 if (rx_adapter == NULL)
3062 if (!rx_adapter->use_queue_event_buf)
3065 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3066 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
3070 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3072 if (dev_info->rx_queue == NULL ||
3073 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3074 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3078 queue_info = &dev_info->rx_queue[rx_queue_id];
3079 rxa_queue_stats_reset(queue_info);
3081 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3082 if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
3083 return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
3084 &rte_eth_devices[eth_dev_id],
3092 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
3094 struct event_eth_rx_adapter *rx_adapter;
3096 if (rxa_memzone_lookup())
3099 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3101 rx_adapter = rxa_id_to_adapter(id);
3102 if (rx_adapter == NULL || service_id == NULL)
3105 if (rx_adapter->service_inited)
3106 *service_id = rx_adapter->service_id;
3108 return rx_adapter->service_inited ? 0 : -ESRCH;
3112 rte_event_eth_rx_adapter_cb_register(uint8_t id,
3113 uint16_t eth_dev_id,
3114 rte_event_eth_rx_adapter_cb_fn cb_fn,
3117 struct event_eth_rx_adapter *rx_adapter;
3118 struct eth_device_info *dev_info;
3122 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3123 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3125 rx_adapter = rxa_id_to_adapter(id);
3126 if (rx_adapter == NULL)
3129 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3130 if (dev_info->rx_queue == NULL)
3133 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
3137 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
3138 "eth port %" PRIu16, id, eth_dev_id);
3142 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
3143 RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
3144 PRIu16, eth_dev_id);
3148 rte_spinlock_lock(&rx_adapter->rx_lock);
3149 dev_info->cb_fn = cb_fn;
3150 dev_info->cb_arg = cb_arg;
3151 rte_spinlock_unlock(&rx_adapter->rx_lock);
3157 rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
3158 uint16_t eth_dev_id,
3159 uint16_t rx_queue_id,
3160 struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
3162 struct rte_eventdev *dev;
3163 struct event_eth_rx_adapter *rx_adapter;
3164 struct eth_device_info *dev_info;
3165 struct eth_rx_queue_info *queue_info;
3166 struct rte_event *qi_ev;
3169 if (rxa_memzone_lookup())
3172 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3173 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3175 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3176 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3180 if (queue_conf == NULL) {
3181 RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3185 rx_adapter = rxa_id_to_adapter(id);
3186 if (rx_adapter == NULL)
3189 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3190 if (dev_info->rx_queue == NULL ||
3191 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3192 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3196 queue_info = &dev_info->rx_queue[rx_queue_id];
3197 qi_ev = (struct rte_event *)&queue_info->event;
3199 memset(queue_conf, 0, sizeof(*queue_conf));
3200 queue_conf->rx_queue_flags = 0;
3201 if (queue_info->flow_id_mask != 0)
3202 queue_conf->rx_queue_flags |=
3203 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3204 queue_conf->servicing_weight = queue_info->wt;
3206 memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
3208 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3209 if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3210 ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3211 &rte_eth_devices[eth_dev_id],
3220 #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
3223 handle_rxa_stats(const char *cmd __rte_unused,
3225 struct rte_tel_data *d)
3227 uint8_t rx_adapter_id;
3228 struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3230 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3233 /* Get Rx adapter ID from parameter string */
3234 rx_adapter_id = atoi(params);
3235 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3237 /* Get Rx adapter stats */
3238 if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3240 RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
3244 rte_tel_data_start_dict(d);
3245 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3246 RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3247 RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3248 RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3249 RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3250 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3251 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3252 RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3253 RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3254 RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3255 RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3256 RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3262 handle_rxa_stats_reset(const char *cmd __rte_unused,
3264 struct rte_tel_data *d __rte_unused)
3266 uint8_t rx_adapter_id;
3268 if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
3271 /* Get Rx adapter ID from parameter string */
3272 rx_adapter_id = atoi(params);
3273 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3275 /* Reset Rx adapter stats */
3276 if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3277 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
3285 handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3287 struct rte_tel_data *d)
3289 uint8_t rx_adapter_id;
3290 uint16_t rx_queue_id;
3292 char *token, *l_params;
3293 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3295 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3298 /* Get Rx adapter ID from parameter string */
3299 l_params = strdup(params);
3300 token = strtok(l_params, ",");
3301 rx_adapter_id = strtoul(token, NULL, 10);
3302 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3304 token = strtok(NULL, ",");
3305 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3308 /* Get device ID from parameter string */
3309 eth_dev_id = strtoul(token, NULL, 10);
3310 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3312 token = strtok(NULL, ",");
3313 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3316 /* Get Rx queue ID from parameter string */
3317 rx_queue_id = strtoul(token, NULL, 10);
3318 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3319 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3323 token = strtok(NULL, "\0");
3325 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3326 " telemetry command, igrnoring");
3328 if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3329 rx_queue_id, &queue_conf)) {
3330 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3334 rte_tel_data_start_dict(d);
3335 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3336 rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3337 rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3338 RXA_ADD_DICT(queue_conf, rx_queue_flags);
3339 RXA_ADD_DICT(queue_conf, servicing_weight);
3340 RXA_ADD_DICT(queue_conf.ev, queue_id);
3341 RXA_ADD_DICT(queue_conf.ev, sched_type);
3342 RXA_ADD_DICT(queue_conf.ev, priority);
3343 RXA_ADD_DICT(queue_conf.ev, flow_id);
3348 RTE_INIT(rxa_init_telemetry)
3350 rte_telemetry_register_cmd("/eventdev/rxa_stats",
3352 "Returns Rx adapter stats. Parameter: rxa_id");
3354 rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3355 handle_rxa_stats_reset,
3356 "Reset Rx adapter stats. Parameter: rxa_id");
3358 rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3359 handle_rxa_get_queue_conf,
3360 "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");