1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation.
10 #include <rte_cycles.h>
11 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_service_component.h>
18 #include <rte_thash.h>
19 #include <rte_interrupts.h>
20 #include <rte_mbuf_dyn.h>
21 #include <rte_telemetry.h>
23 #include "rte_eventdev.h"
24 #include "eventdev_pmd.h"
25 #include "eventdev_trace.h"
26 #include "rte_event_eth_rx_adapter.h"
29 #define BLOCK_CNT_THRESHOLD 10
30 #define ETH_EVENT_BUFFER_SIZE (6*BATCH_SIZE)
31 #define MAX_VECTOR_SIZE 1024
32 #define MIN_VECTOR_SIZE 4
33 #define MAX_VECTOR_NS 1E9
34 #define MIN_VECTOR_NS 1E5
36 #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
37 #define ETH_RX_ADAPTER_MEM_NAME_LEN 32
39 #define RSS_KEY_SIZE 40
40 /* value written to intr thread pipe to signal thread exit */
41 #define ETH_BRIDGE_INTR_THREAD_EXIT 1
42 /* Sentinel value to detect initialized file handle */
45 #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
48 * Used to store port and queue ID of interrupting Rx queue
60 * There is an instance of this struct per polled Rx queue added to the
63 struct eth_rx_poll_entry {
64 /* Eth port to poll */
66 /* Eth rx queue to poll */
70 struct eth_rx_vector_data {
71 TAILQ_ENTRY(eth_rx_vector_data) next;
74 uint16_t max_vector_count;
77 uint64_t vector_timeout_ticks;
78 struct rte_mempool *vector_pool;
79 struct rte_event_vector *vector_ev;
80 } __rte_cache_aligned;
82 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
84 /* Instance per adapter */
85 struct eth_event_enqueue_buffer {
86 /* Count of events in this buffer */
88 /* Array of events in this buffer */
89 struct rte_event *events;
90 /* size of event buffer */
92 /* Event enqueue happens from head */
94 /* New packets from rte_eth_rx_burst is enqued from tail */
96 /* last element in the buffer before rollover */
101 struct event_eth_rx_adapter {
103 uint8_t rss_key_be[RSS_KEY_SIZE];
104 /* Event device identifier */
106 /* Event port identifier */
107 uint8_t event_port_id;
108 /* Flag indicating per rxq event buffer */
109 bool use_queue_event_buf;
110 /* Per ethernet device structure */
111 struct eth_device_info *eth_devices;
112 /* Lock to serialize config updates with service function */
113 rte_spinlock_t rx_lock;
114 /* Max mbufs processed in any service function invocation */
116 /* Receive queues that need to be polled */
117 struct eth_rx_poll_entry *eth_rx_poll;
118 /* Size of the eth_rx_poll array */
119 uint16_t num_rx_polled;
120 /* Weighted round robin schedule */
122 /* wrr_sched[] size */
124 /* Next entry in wrr[] to begin polling */
126 /* Event burst buffer */
127 struct eth_event_enqueue_buffer event_enqueue_buffer;
128 /* Vector enable flag */
130 /* Timestamp of previous vector expiry list traversal */
131 uint64_t prev_expiry_ts;
132 /* Minimum ticks to wait before traversing expiry list */
133 uint64_t vector_tmo_ticks;
135 struct eth_rx_vector_data_list vector_list;
136 /* Per adapter stats */
137 struct rte_event_eth_rx_adapter_stats stats;
138 /* Block count, counts up to BLOCK_CNT_THRESHOLD */
139 uint16_t enq_block_count;
141 uint64_t rx_enq_block_start_ts;
142 /* epoll fd used to wait for Rx interrupts */
144 /* Num of interrupt driven interrupt queues */
145 uint32_t num_rx_intr;
146 /* Used to send <dev id, queue id> of interrupting Rx queues from
147 * the interrupt thread to the Rx thread
149 struct rte_ring *intr_ring;
150 /* Rx Queue data (dev id, queue id) for the last non-empty
154 /* queue_data is valid */
156 /* Interrupt ring lock, synchronizes Rx thread
157 * and interrupt thread
159 rte_spinlock_t intr_ring_lock;
160 /* event array passed to rte_poll_wait */
161 struct rte_epoll_event *epoll_events;
162 /* Count of interrupt vectors in use */
163 uint32_t num_intr_vec;
164 /* Thread blocked on Rx interrupts */
165 pthread_t rx_intr_thread;
166 /* Configuration callback for rte_service configuration */
167 rte_event_eth_rx_adapter_conf_cb conf_cb;
168 /* Configuration callback argument */
170 /* Set if default_cb is being used */
172 /* Service initialization state */
173 uint8_t service_inited;
174 /* Total count of Rx queues in adapter */
176 /* Memory allocation name */
177 char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
178 /* Socket identifier cached from eventdev */
180 /* Per adapter EAL service */
182 /* Adapter started flag */
186 } __rte_cache_aligned;
189 struct eth_device_info {
190 struct rte_eth_dev *dev;
191 struct eth_rx_queue_info *rx_queue;
193 rte_event_eth_rx_adapter_cb_fn cb_fn;
194 /* Rx callback argument */
196 /* Set if ethdev->eventdev packet transfer uses a
199 uint8_t internal_event_port;
200 /* Set if the adapter is processing rx queues for
201 * this eth device and packet processing has been
202 * started, allows for the code to know if the PMD
203 * rx_adapter_stop callback needs to be invoked
205 uint8_t dev_rx_started;
206 /* Number of queues added for this device */
207 uint16_t nb_dev_queues;
208 /* Number of poll based queues
209 * If nb_rx_poll > 0, the start callback will
210 * be invoked if not already invoked
213 /* Number of interrupt based queues
214 * If nb_rx_intr > 0, the start callback will
215 * be invoked if not already invoked.
218 /* Number of queues that use the shared interrupt */
219 uint16_t nb_shared_intr;
220 /* sum(wrr(q)) for all queues within the device
221 * useful when deleting all device queues
224 /* Intr based queue index to start polling from, this is used
225 * if the number of shared interrupts is non-zero
228 /* Intr based queue indices */
229 uint16_t *intr_queue;
230 /* device generates per Rx queue interrupt for queue index
231 * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
234 /* shared interrupt enabled */
235 int shared_intr_enabled;
239 struct eth_rx_queue_info {
240 int queue_enabled; /* True if added */
243 uint16_t wt; /* Polling weight */
244 uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
246 struct eth_rx_vector_data vector_data;
247 struct eth_event_enqueue_buffer *event_buf;
248 /* use adapter stats struct for queue level stats,
249 * as same stats need to be updated for adapter and queue
251 struct rte_event_eth_rx_adapter_stats *stats;
254 static struct event_eth_rx_adapter **event_eth_rx_adapter;
256 /* Enable dynamic timestamp field in mbuf */
257 static uint64_t event_eth_rx_timestamp_dynflag;
258 static int event_eth_rx_timestamp_dynfield_offset = -1;
260 static inline rte_mbuf_timestamp_t *
261 rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
263 return RTE_MBUF_DYNFIELD(mbuf,
264 event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
268 rxa_validate_id(uint8_t id)
270 return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
273 static inline struct eth_event_enqueue_buffer *
274 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
275 uint16_t rx_queue_id,
276 struct rte_event_eth_rx_adapter_stats **stats)
278 if (rx_adapter->use_queue_event_buf) {
279 struct eth_device_info *dev_info =
280 &rx_adapter->eth_devices[eth_dev_id];
281 *stats = dev_info->rx_queue[rx_queue_id].stats;
282 return dev_info->rx_queue[rx_queue_id].event_buf;
284 *stats = &rx_adapter->stats;
285 return &rx_adapter->event_enqueue_buffer;
289 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
290 if (!rxa_validate_id(id)) { \
291 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
297 rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
299 return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
302 /* Greatest common divisor */
303 static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
307 return r ? rxa_gcd_u16(b, r) : b;
310 /* Returns the next queue in the polling sequence
312 * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
315 rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
316 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
317 uint16_t gcd, int prev)
333 q = eth_rx_poll[i].eth_rx_qid;
334 d = eth_rx_poll[i].eth_dev_id;
335 w = rx_adapter->eth_devices[d].rx_queue[q].wt;
343 rxa_shared_intr(struct eth_device_info *dev_info,
348 if (dev_info->dev->intr_handle == NULL)
351 multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
352 return !multi_intr_cap ||
353 rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
357 rxa_intr_queue(struct eth_device_info *dev_info,
360 struct eth_rx_queue_info *queue_info;
362 queue_info = &dev_info->rx_queue[rx_queue_id];
363 return dev_info->rx_queue &&
364 !dev_info->internal_event_port &&
365 queue_info->queue_enabled && queue_info->wt == 0;
369 rxa_polled_queue(struct eth_device_info *dev_info,
372 struct eth_rx_queue_info *queue_info;
374 queue_info = &dev_info->rx_queue[rx_queue_id];
375 return !dev_info->internal_event_port &&
376 dev_info->rx_queue &&
377 queue_info->queue_enabled && queue_info->wt != 0;
380 /* Calculate change in number of vectors after Rx queue ID is add/deleted */
382 rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
388 nbq = dev_info->dev->data->nb_rx_queues;
389 n = 0; /* non shared count */
390 s = 0; /* shared count */
392 if (rx_queue_id == -1) {
393 for (i = 0; i < nbq; i++) {
394 if (!rxa_shared_intr(dev_info, i))
395 n += add ? !rxa_intr_queue(dev_info, i) :
396 rxa_intr_queue(dev_info, i);
398 s += add ? !rxa_intr_queue(dev_info, i) :
399 rxa_intr_queue(dev_info, i);
403 if ((add && dev_info->nb_shared_intr == 0) ||
404 (!add && dev_info->nb_shared_intr))
408 if (!rxa_shared_intr(dev_info, rx_queue_id))
409 n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
410 rxa_intr_queue(dev_info, rx_queue_id);
412 n = add ? !dev_info->nb_shared_intr :
413 dev_info->nb_shared_intr == 1;
419 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
422 rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
423 struct eth_device_info *dev_info, int rx_queue_id,
424 uint32_t *nb_rx_intr)
428 if (rx_queue_id == -1)
429 intr_diff = dev_info->nb_rx_intr;
431 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
433 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
436 /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
437 * interrupt queues could currently be poll mode Rx queues
440 rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
441 struct eth_device_info *dev_info, int rx_queue_id,
442 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
447 uint32_t wrr_len_diff;
449 if (rx_queue_id == -1) {
450 intr_diff = dev_info->dev->data->nb_rx_queues -
451 dev_info->nb_rx_intr;
452 poll_diff = dev_info->nb_rx_poll;
453 wrr_len_diff = dev_info->wrr_len;
455 intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
456 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
457 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
461 *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
462 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
463 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
466 /* Calculate size of the eth_rx_poll and wrr_sched arrays
467 * after deleting poll mode rx queues
470 rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
471 struct eth_device_info *dev_info, int rx_queue_id,
472 uint32_t *nb_rx_poll, uint32_t *nb_wrr)
475 uint32_t wrr_len_diff;
477 if (rx_queue_id == -1) {
478 poll_diff = dev_info->nb_rx_poll;
479 wrr_len_diff = dev_info->wrr_len;
481 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
482 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
486 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
487 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
490 /* Calculate nb_rx_* after adding poll mode rx queues
493 rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
494 struct eth_device_info *dev_info, int rx_queue_id,
495 uint16_t wt, uint32_t *nb_rx_poll,
496 uint32_t *nb_rx_intr, uint32_t *nb_wrr)
500 uint32_t wrr_len_diff;
502 if (rx_queue_id == -1) {
503 intr_diff = dev_info->nb_rx_intr;
504 poll_diff = dev_info->dev->data->nb_rx_queues -
505 dev_info->nb_rx_poll;
506 wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
509 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
510 poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
511 wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
512 wt - dev_info->rx_queue[rx_queue_id].wt :
516 *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
517 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
518 *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
521 /* Calculate nb_rx_* after adding rx_queue_id */
523 rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
524 struct eth_device_info *dev_info, int rx_queue_id,
525 uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
529 rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
530 wt, nb_rx_poll, nb_rx_intr, nb_wrr);
532 rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
533 nb_rx_poll, nb_rx_intr, nb_wrr);
536 /* Calculate nb_rx_* after deleting rx_queue_id */
538 rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
539 struct eth_device_info *dev_info, int rx_queue_id,
540 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
543 rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
545 rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
550 * Allocate the rx_poll array
552 static struct eth_rx_poll_entry *
553 rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
557 len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
558 RTE_CACHE_LINE_SIZE);
559 return rte_zmalloc_socket(rx_adapter->mem_name,
562 rx_adapter->socket_id);
566 * Allocate the WRR array
569 rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
573 len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
574 RTE_CACHE_LINE_SIZE);
575 return rte_zmalloc_socket(rx_adapter->mem_name,
578 rx_adapter->socket_id);
582 rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
583 uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
584 uint32_t **wrr_sched)
593 *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
594 if (*rx_poll == NULL) {
599 *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
600 if (*wrr_sched == NULL) {
607 /* Precalculate WRR polling sequence for all queues in rx_adapter */
609 rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
610 struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
618 /* Initialize variables for calculation of wrr schedule */
619 uint16_t max_wrr_pos = 0;
620 unsigned int poll_q = 0;
627 /* Generate array of all queues to poll, the size of this
630 RTE_ETH_FOREACH_DEV(d) {
631 uint16_t nb_rx_queues;
632 struct eth_device_info *dev_info =
633 &rx_adapter->eth_devices[d];
634 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
635 if (dev_info->rx_queue == NULL)
637 if (dev_info->internal_event_port)
639 dev_info->wrr_len = 0;
640 for (q = 0; q < nb_rx_queues; q++) {
641 struct eth_rx_queue_info *queue_info =
642 &dev_info->rx_queue[q];
645 if (!rxa_polled_queue(dev_info, q))
648 rx_poll[poll_q].eth_dev_id = d;
649 rx_poll[poll_q].eth_rx_qid = q;
651 dev_info->wrr_len += wt;
652 max_wt = RTE_MAX(max_wt, wt);
653 gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
658 /* Generate polling sequence based on weights */
661 for (i = 0; i < max_wrr_pos; i++) {
662 rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
663 rx_poll, max_wt, gcd, prev);
669 rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
670 struct rte_ipv6_hdr **ipv6_hdr)
672 struct rte_ether_hdr *eth_hdr =
673 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
674 struct rte_vlan_hdr *vlan_hdr;
679 switch (eth_hdr->ether_type) {
680 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
681 *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
684 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
685 *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
688 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
689 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
690 switch (vlan_hdr->eth_proto) {
691 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
692 *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
694 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
695 *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
707 /* Calculate RSS hash for IPv4/6 */
708 static inline uint32_t
709 rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
713 struct rte_ipv4_tuple ipv4_tuple;
714 struct rte_ipv6_tuple ipv6_tuple;
715 struct rte_ipv4_hdr *ipv4_hdr;
716 struct rte_ipv6_hdr *ipv6_hdr;
718 rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
721 ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
722 ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
724 input_len = RTE_THASH_V4_L3_LEN;
725 } else if (ipv6_hdr) {
726 rte_thash_load_v6_addrs(ipv6_hdr,
727 (union rte_thash_tuple *)&ipv6_tuple);
729 input_len = RTE_THASH_V6_L3_LEN;
733 return rte_softrss_be(tuple, input_len, rss_key_be);
737 rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
739 return !!rx_adapter->enq_block_count;
743 rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
745 if (rx_adapter->rx_enq_block_start_ts)
748 rx_adapter->enq_block_count++;
749 if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
752 rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
756 rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
757 struct rte_event_eth_rx_adapter_stats *stats)
759 if (unlikely(!stats->rx_enq_start_ts))
760 stats->rx_enq_start_ts = rte_get_tsc_cycles();
762 if (likely(!rxa_enq_blocked(rx_adapter)))
765 rx_adapter->enq_block_count = 0;
766 if (rx_adapter->rx_enq_block_start_ts) {
767 stats->rx_enq_end_ts = rte_get_tsc_cycles();
768 stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
769 rx_adapter->rx_enq_block_start_ts;
770 rx_adapter->rx_enq_block_start_ts = 0;
774 /* Enqueue buffered events to event device */
775 static inline uint16_t
776 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
777 struct eth_event_enqueue_buffer *buf,
778 struct rte_event_eth_rx_adapter_stats *stats)
780 uint16_t count = buf->count;
787 count = buf->last - buf->head;
790 n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
791 rx_adapter->event_port_id,
792 &buf->events[buf->head],
795 stats->rx_enq_retry++;
800 if (buf->last && n == count) {
803 n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
804 rx_adapter->event_port_id,
809 stats->rx_enq_retry++;
817 n ? rxa_enq_block_end_ts(rx_adapter, stats) :
818 rxa_enq_block_start_ts(rx_adapter);
821 stats->rx_enq_count += n;
827 rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
828 struct eth_rx_vector_data *vec)
830 vec->vector_ev->nb_elem = 0;
831 vec->vector_ev->port = vec->port;
832 vec->vector_ev->queue = vec->queue;
833 vec->vector_ev->attr_valid = true;
834 TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
837 static inline uint16_t
838 rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
839 struct eth_rx_queue_info *queue_info,
840 struct eth_event_enqueue_buffer *buf,
841 struct rte_mbuf **mbufs, uint16_t num)
843 struct rte_event *ev = &buf->events[buf->count];
844 struct eth_rx_vector_data *vec;
845 uint16_t filled, space, sz;
848 vec = &queue_info->vector_data;
850 if (vec->vector_ev == NULL) {
851 if (rte_mempool_get(vec->vector_pool,
852 (void **)&vec->vector_ev) < 0) {
853 rte_pktmbuf_free_bulk(mbufs, num);
856 rxa_init_vector(rx_adapter, vec);
859 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
861 ev->event = vec->event;
862 ev->vec = vec->vector_ev;
865 vec->vector_ev = NULL;
866 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
867 if (rte_mempool_get(vec->vector_pool,
868 (void **)&vec->vector_ev) < 0) {
869 rte_pktmbuf_free_bulk(mbufs, num);
872 rxa_init_vector(rx_adapter, vec);
875 space = vec->max_vector_count - vec->vector_ev->nb_elem;
876 sz = num > space ? space : num;
877 memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
878 sizeof(void *) * sz);
879 vec->vector_ev->nb_elem += sz;
882 vec->ts = rte_rdtsc();
885 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
886 ev->event = vec->event;
887 ev->vec = vec->vector_ev;
890 vec->vector_ev = NULL;
891 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
898 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
899 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
900 struct eth_event_enqueue_buffer *buf,
901 struct rte_event_eth_rx_adapter_stats *stats)
904 struct eth_device_info *dev_info =
905 &rx_adapter->eth_devices[eth_dev_id];
906 struct eth_rx_queue_info *eth_rx_queue_info =
907 &dev_info->rx_queue[rx_queue_id];
908 uint16_t new_tail = buf->tail;
909 uint64_t event = eth_rx_queue_info->event;
910 uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
911 struct rte_mbuf *m = mbufs[0];
917 uint64_t ts, ts_mask;
919 if (!eth_rx_queue_info->ena_vector) {
920 ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
921 0 : rte_get_tsc_cycles();
923 /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
926 ts_mask = (uint64_t)(!(m->ol_flags &
927 event_eth_rx_timestamp_dynflag)) - 1ULL;
929 /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
930 rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
931 do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
932 for (i = 0; i < num; i++) {
933 struct rte_event *ev;
936 *rxa_timestamp_dynfield(m) = ts |
937 (*rxa_timestamp_dynfield(m) & ts_mask);
939 ev = &buf->events[new_tail];
941 rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
944 ev->flow_id = (rss & ~flow_id_mask) |
945 (ev->flow_id & flow_id_mask);
950 num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
954 if (num && dev_info->cb_fn) {
957 nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
959 (buf->events_size & ~buf->last_mask),
960 buf->count >= BATCH_SIZE ?
961 buf->count - BATCH_SIZE : 0,
962 &buf->events[buf->tail],
966 if (unlikely(nb_cb > num))
967 RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
972 stats->rx_dropped += dropped;
980 rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
982 uint32_t nb_req = buf->tail + BATCH_SIZE;
985 if (nb_req <= buf->events_size)
988 if (buf->head >= BATCH_SIZE) {
990 buf->last = buf->tail;
996 return nb_req <= buf->head;
999 /* Enqueue packets from <port, q> to event buffer */
1000 static inline uint32_t
1001 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
1002 uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
1003 int *rxq_empty, struct eth_event_enqueue_buffer *buf,
1004 struct rte_event_eth_rx_adapter_stats *stats)
1006 struct rte_mbuf *mbufs[BATCH_SIZE];
1009 uint32_t nb_flushed = 0;
1013 /* Don't do a batch dequeue from the rx queue if there isn't
1014 * enough space in the enqueue buffer.
1016 while (rxa_pkt_buf_available(buf)) {
1017 if (buf->count >= BATCH_SIZE)
1019 rxa_flush_event_buffer(rx_adapter, buf, stats);
1021 stats->rx_poll_count++;
1022 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
1028 rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
1031 if (rx_count + nb_rx > max_rx)
1036 nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats);
1038 stats->rx_packets += nb_rx;
1039 if (nb_flushed == 0)
1040 rte_event_maintain(rx_adapter->eventdev_id,
1041 rx_adapter->event_port_id, 0);
1047 rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
1052 union queue_data qd;
1053 struct eth_device_info *dev_info;
1054 struct eth_rx_queue_info *queue_info;
1061 dev_info = &rx_adapter->eth_devices[port_id];
1062 queue_info = &dev_info->rx_queue[queue];
1063 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1064 if (rxa_shared_intr(dev_info, queue))
1065 intr_enabled = &dev_info->shared_intr_enabled;
1067 intr_enabled = &queue_info->intr_enabled;
1069 if (*intr_enabled) {
1071 err = rte_ring_enqueue(rx_adapter->intr_ring, data);
1072 /* Entry should always be available.
1073 * The ring size equals the maximum number of interrupt
1074 * vectors supported (an interrupt vector is shared in
1075 * case of shared interrupts)
1078 RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
1079 " to ring: %s", strerror(-err));
1081 rte_eth_dev_rx_intr_disable(port_id, queue);
1083 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1087 rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
1088 uint32_t num_intr_vec)
1090 if (rx_adapter->num_intr_vec + num_intr_vec >
1091 RTE_EVENT_ETH_INTR_RING_SIZE) {
1092 RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
1093 " %d needed %d limit %d", rx_adapter->num_intr_vec,
1094 num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
1101 /* Delete entries for (dev, queue) from the interrupt ring */
1103 rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
1104 struct eth_device_info *dev_info,
1105 uint16_t rx_queue_id)
1108 union queue_data qd;
1110 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1112 n = rte_ring_count(rx_adapter->intr_ring);
1113 for (i = 0; i < n; i++) {
1114 rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1115 if (!rxa_shared_intr(dev_info, rx_queue_id)) {
1116 if (qd.port == dev_info->dev->data->port_id &&
1117 qd.queue == rx_queue_id)
1120 if (qd.port == dev_info->dev->data->port_id)
1123 rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
1126 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1129 /* pthread callback handling interrupt mode receive queues
1130 * After receiving an Rx interrupt, it enqueues the port id and queue id of the
1131 * interrupting queue to the adapter's ring buffer for interrupt events.
1132 * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
1133 * the adapter service function.
1136 rxa_intr_thread(void *arg)
1138 struct event_eth_rx_adapter *rx_adapter = arg;
1139 struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
1143 n = rte_epoll_wait(rx_adapter->epd, epoll_events,
1144 RTE_EVENT_ETH_INTR_RING_SIZE, -1);
1145 if (unlikely(n < 0))
1146 RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
1148 for (i = 0; i < n; i++) {
1149 rxa_intr_ring_enqueue(rx_adapter,
1150 epoll_events[i].epdata.data);
1157 /* Dequeue <port, q> from interrupt ring and enqueue received
1161 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
1166 struct eth_event_enqueue_buffer *buf;
1167 struct rte_event_eth_rx_adapter_stats *stats;
1168 rte_spinlock_t *ring_lock;
1169 uint8_t max_done = 0;
1171 if (rx_adapter->num_rx_intr == 0)
1174 if (rte_ring_count(rx_adapter->intr_ring) == 0
1175 && !rx_adapter->qd_valid)
1178 buf = &rx_adapter->event_enqueue_buffer;
1179 stats = &rx_adapter->stats;
1180 ring_lock = &rx_adapter->intr_ring_lock;
1182 if (buf->count >= BATCH_SIZE)
1183 rxa_flush_event_buffer(rx_adapter, buf, stats);
1185 while (rxa_pkt_buf_available(buf)) {
1186 struct eth_device_info *dev_info;
1189 union queue_data qd = rx_adapter->qd;
1192 if (!rx_adapter->qd_valid) {
1193 struct eth_rx_queue_info *queue_info;
1195 rte_spinlock_lock(ring_lock);
1196 err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1198 rte_spinlock_unlock(ring_lock);
1204 rx_adapter->qd = qd;
1205 rx_adapter->qd_valid = 1;
1206 dev_info = &rx_adapter->eth_devices[port];
1207 if (rxa_shared_intr(dev_info, queue))
1208 dev_info->shared_intr_enabled = 1;
1210 queue_info = &dev_info->rx_queue[queue];
1211 queue_info->intr_enabled = 1;
1213 rte_eth_dev_rx_intr_enable(port, queue);
1214 rte_spinlock_unlock(ring_lock);
1219 dev_info = &rx_adapter->eth_devices[port];
1222 if (rxa_shared_intr(dev_info, queue)) {
1226 nb_queues = dev_info->dev->data->nb_rx_queues;
1228 for (i = dev_info->next_q_idx; i < nb_queues; i++) {
1229 uint8_t enq_buffer_full;
1231 if (!rxa_intr_queue(dev_info, i))
1233 n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
1234 rx_adapter->max_nb_rx,
1235 &rxq_empty, buf, stats);
1238 enq_buffer_full = !rxq_empty && n == 0;
1239 max_done = nb_rx > rx_adapter->max_nb_rx;
1241 if (enq_buffer_full || max_done) {
1242 dev_info->next_q_idx = i;
1247 rx_adapter->qd_valid = 0;
1249 /* Reinitialize for next interrupt */
1250 dev_info->next_q_idx = dev_info->multi_intr_cap ?
1251 RTE_MAX_RXTX_INTR_VEC_ID - 1 :
1254 n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
1255 rx_adapter->max_nb_rx,
1256 &rxq_empty, buf, stats);
1257 rx_adapter->qd_valid = !rxq_empty;
1259 if (nb_rx > rx_adapter->max_nb_rx)
1265 rx_adapter->stats.rx_intr_packets += nb_rx;
1269 * Polls receive queues added to the event adapter and enqueues received
1270 * packets to the event device.
1272 * The receive code enqueues initially to a temporary buffer, the
1273 * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
1275 * If there isn't space available in the temporary buffer, packets from the
1276 * Rx queue aren't dequeued from the eth device, this back pressures the
1277 * eth device, in virtual device environments this back pressure is relayed to
1278 * the hypervisor's switching layer where adjustments can be made to deal with
1282 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
1286 struct eth_event_enqueue_buffer *buf = NULL;
1287 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1291 wrr_pos = rx_adapter->wrr_pos;
1292 max_nb_rx = rx_adapter->max_nb_rx;
1294 /* Iterate through a WRR sequence */
1295 for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
1296 unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
1297 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
1298 uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
1300 buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
1302 /* Don't do a batch dequeue from the rx queue if there isn't
1303 * enough space in the enqueue buffer.
1305 if (buf->count >= BATCH_SIZE)
1306 rxa_flush_event_buffer(rx_adapter, buf, stats);
1307 if (!rxa_pkt_buf_available(buf)) {
1308 if (rx_adapter->use_queue_event_buf)
1309 goto poll_next_entry;
1311 rx_adapter->wrr_pos = wrr_pos;
1316 nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1318 if (nb_rx > max_nb_rx) {
1319 rx_adapter->wrr_pos =
1320 (wrr_pos + 1) % rx_adapter->wrr_len;
1325 if (++wrr_pos == rx_adapter->wrr_len)
1331 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
1333 struct event_eth_rx_adapter *rx_adapter = arg;
1334 struct eth_event_enqueue_buffer *buf = NULL;
1335 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1336 struct rte_event *ev;
1338 buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
1341 rxa_flush_event_buffer(rx_adapter, buf, stats);
1343 if (vec->vector_ev->nb_elem == 0)
1345 ev = &buf->events[buf->count];
1348 ev->event = vec->event;
1349 ev->vec = vec->vector_ev;
1352 vec->vector_ev = NULL;
1357 rxa_service_func(void *args)
1359 struct event_eth_rx_adapter *rx_adapter = args;
1361 if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
1363 if (!rx_adapter->rxa_started) {
1364 rte_spinlock_unlock(&rx_adapter->rx_lock);
1368 if (rx_adapter->ena_vector) {
1369 if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
1370 rx_adapter->vector_tmo_ticks) {
1371 struct eth_rx_vector_data *vec;
1373 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1374 uint64_t elapsed_time = rte_rdtsc() - vec->ts;
1376 if (elapsed_time >= vec->vector_timeout_ticks) {
1377 rxa_vector_expire(vec, rx_adapter);
1378 TAILQ_REMOVE(&rx_adapter->vector_list,
1382 rx_adapter->prev_expiry_ts = rte_rdtsc();
1386 rxa_intr_ring_dequeue(rx_adapter);
1387 rxa_poll(rx_adapter);
1389 rte_spinlock_unlock(&rx_adapter->rx_lock);
1395 rte_event_eth_rx_adapter_init(void)
1397 const char *name = RXA_ADAPTER_ARRAY;
1398 const struct rte_memzone *mz;
1401 sz = sizeof(*event_eth_rx_adapter) *
1402 RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
1403 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
1405 mz = rte_memzone_lookup(name);
1407 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
1408 RTE_CACHE_LINE_SIZE);
1410 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
1416 event_eth_rx_adapter = mz->addr;
1421 rxa_memzone_lookup(void)
1423 const struct rte_memzone *mz;
1425 if (event_eth_rx_adapter == NULL) {
1426 mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1429 event_eth_rx_adapter = mz->addr;
1435 static inline struct event_eth_rx_adapter *
1436 rxa_id_to_adapter(uint8_t id)
1438 return event_eth_rx_adapter ?
1439 event_eth_rx_adapter[id] : NULL;
1443 rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
1444 struct rte_event_eth_rx_adapter_conf *conf, void *arg)
1447 struct rte_eventdev *dev;
1448 struct rte_event_dev_config dev_conf;
1451 struct rte_event_port_conf *port_conf = arg;
1452 struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
1454 dev = &rte_eventdevs[rx_adapter->eventdev_id];
1455 dev_conf = dev->data->dev_conf;
1457 started = dev->data->dev_started;
1459 rte_event_dev_stop(dev_id);
1460 port_id = dev_conf.nb_event_ports;
1461 dev_conf.nb_event_ports += 1;
1462 ret = rte_event_dev_configure(dev_id, &dev_conf);
1464 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
1467 if (rte_event_dev_start(dev_id))
1473 ret = rte_event_port_setup(dev_id, port_id, port_conf);
1475 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
1480 conf->event_port_id = port_id;
1481 conf->max_nb_rx = 128;
1483 ret = rte_event_dev_start(dev_id);
1484 rx_adapter->default_cb_arg = 1;
1489 rxa_epoll_create1(void)
1493 fd = epoll_create1(EPOLL_CLOEXEC);
1494 return fd < 0 ? -errno : fd;
1501 rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
1503 if (rx_adapter->epd != INIT_FD)
1506 rx_adapter->epd = rxa_epoll_create1();
1507 if (rx_adapter->epd < 0) {
1508 int err = rx_adapter->epd;
1509 rx_adapter->epd = INIT_FD;
1510 RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
1518 rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1521 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1523 if (rx_adapter->intr_ring)
1526 rx_adapter->intr_ring = rte_ring_create("intr_ring",
1527 RTE_EVENT_ETH_INTR_RING_SIZE,
1528 rte_socket_id(), 0);
1529 if (!rx_adapter->intr_ring)
1532 rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
1533 RTE_EVENT_ETH_INTR_RING_SIZE *
1534 sizeof(struct rte_epoll_event),
1535 RTE_CACHE_LINE_SIZE,
1536 rx_adapter->socket_id);
1537 if (!rx_adapter->epoll_events) {
1542 rte_spinlock_init(&rx_adapter->intr_ring_lock);
1544 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
1545 "rx-intr-thread-%d", rx_adapter->id);
1547 err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
1548 NULL, rxa_intr_thread, rx_adapter);
1552 RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1553 rte_free(rx_adapter->epoll_events);
1555 rte_ring_free(rx_adapter->intr_ring);
1556 rx_adapter->intr_ring = NULL;
1557 rx_adapter->epoll_events = NULL;
1562 rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1566 err = pthread_cancel(rx_adapter->rx_intr_thread);
1568 RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
1571 err = pthread_join(rx_adapter->rx_intr_thread, NULL);
1573 RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
1575 rte_free(rx_adapter->epoll_events);
1576 rte_ring_free(rx_adapter->intr_ring);
1577 rx_adapter->intr_ring = NULL;
1578 rx_adapter->epoll_events = NULL;
1583 rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
1587 if (rx_adapter->num_rx_intr == 0)
1590 ret = rxa_destroy_intr_thread(rx_adapter);
1594 close(rx_adapter->epd);
1595 rx_adapter->epd = INIT_FD;
1601 rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
1602 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1605 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1606 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1608 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1610 RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
1615 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1620 RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
1623 dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
1625 dev_info->shared_intr_enabled = 0;
1630 rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1631 struct eth_device_info *dev_info, int rx_queue_id)
1637 if (dev_info->nb_rx_intr == 0)
1641 if (rx_queue_id == -1) {
1642 s = dev_info->nb_shared_intr;
1643 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1647 q = dev_info->intr_queue[i];
1648 sintr = rxa_shared_intr(dev_info, q);
1651 if (!sintr || s == 0) {
1653 err = rxa_disable_intr(rx_adapter, dev_info,
1657 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1662 if (!rxa_intr_queue(dev_info, rx_queue_id))
1664 if (!rxa_shared_intr(dev_info, rx_queue_id) ||
1665 dev_info->nb_shared_intr == 1) {
1666 err = rxa_disable_intr(rx_adapter, dev_info,
1670 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1674 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1675 if (dev_info->intr_queue[i] == rx_queue_id) {
1676 for (; i < dev_info->nb_rx_intr - 1; i++)
1677 dev_info->intr_queue[i] =
1678 dev_info->intr_queue[i + 1];
1688 rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
1689 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1692 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1693 union queue_data qd;
1695 uint16_t *intr_queue;
1696 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1698 if (rxa_intr_queue(dev_info, rx_queue_id))
1701 intr_queue = dev_info->intr_queue;
1702 if (dev_info->intr_queue == NULL) {
1704 dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
1705 dev_info->intr_queue =
1707 rx_adapter->mem_name,
1710 rx_adapter->socket_id);
1711 if (dev_info->intr_queue == NULL)
1715 init_fd = rx_adapter->epd;
1716 err = rxa_init_epd(rx_adapter);
1718 goto err_free_queue;
1720 qd.port = eth_dev_id;
1721 qd.queue = rx_queue_id;
1723 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1728 RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
1729 " Rx Queue %u err %d", rx_queue_id, err);
1733 err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
1735 RTE_EDEV_LOG_ERR("Could not enable interrupt for"
1736 " Rx Queue %u err %d", rx_queue_id, err);
1741 err = rxa_create_intr_thread(rx_adapter);
1744 dev_info->shared_intr_enabled = 1;
1746 dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
1751 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1753 RTE_EDEV_LOG_ERR("Could not disable interrupt for"
1754 " Rx Queue %u err %d", rx_queue_id, err);
1756 err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1761 RTE_EDEV_LOG_ERR("Could not delete event for"
1762 " Rx Queue %u err %d", rx_queue_id, err1);
1765 if (init_fd == INIT_FD) {
1766 close(rx_adapter->epd);
1767 rx_adapter->epd = -1;
1770 if (intr_queue == NULL)
1771 rte_free(dev_info->intr_queue);
1777 rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1778 struct eth_device_info *dev_info, int rx_queue_id)
1783 int shared_done = (dev_info->nb_shared_intr > 0);
1785 if (rx_queue_id != -1) {
1786 if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
1788 return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
1792 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
1794 if (rxa_shared_intr(dev_info, i) && shared_done)
1797 err = rxa_config_intr(rx_adapter, dev_info, i);
1799 shared_done = err == 0 && rxa_shared_intr(dev_info, i);
1802 dev_info->shared_intr_enabled = 1;
1811 shared_done = (dev_info->nb_shared_intr > 0);
1812 for (j = 0; j < i; j++) {
1813 if (rxa_intr_queue(dev_info, j))
1815 if (rxa_shared_intr(dev_info, j) && si != j)
1817 err = rxa_disable_intr(rx_adapter, dev_info, j);
1827 rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
1830 struct rte_service_spec service;
1831 struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
1833 if (rx_adapter->service_inited)
1836 memset(&service, 0, sizeof(service));
1837 snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
1838 "rte_event_eth_rx_adapter_%d", id);
1839 service.socket_id = rx_adapter->socket_id;
1840 service.callback = rxa_service_func;
1841 service.callback_userdata = rx_adapter;
1842 /* Service function handles locking for queue add/del updates */
1843 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
1844 ret = rte_service_component_register(&service, &rx_adapter->service_id);
1846 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
1851 ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
1852 &rx_adapter_conf, rx_adapter->conf_arg);
1854 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
1858 rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
1859 rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
1860 rx_adapter->service_inited = 1;
1861 rx_adapter->epd = INIT_FD;
1865 rte_service_component_unregister(rx_adapter->service_id);
1870 rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
1871 struct eth_device_info *dev_info, int32_t rx_queue_id,
1874 struct eth_rx_queue_info *queue_info;
1878 if (dev_info->rx_queue == NULL)
1881 if (rx_queue_id == -1) {
1882 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
1883 rxa_update_queue(rx_adapter, dev_info, i, add);
1885 queue_info = &dev_info->rx_queue[rx_queue_id];
1886 enabled = queue_info->queue_enabled;
1888 rx_adapter->nb_queues += !enabled;
1889 dev_info->nb_dev_queues += !enabled;
1891 rx_adapter->nb_queues -= enabled;
1892 dev_info->nb_dev_queues -= enabled;
1894 queue_info->queue_enabled = !!add;
1899 rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
1900 uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
1903 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
1904 struct eth_rx_vector_data *vector_data;
1907 vector_data = &queue_info->vector_data;
1908 vector_data->max_vector_count = vector_count;
1909 vector_data->port = port_id;
1910 vector_data->queue = qid;
1911 vector_data->vector_pool = mp;
1912 vector_data->vector_timeout_ticks =
1913 NSEC2TICK(vector_ns, rte_get_timer_hz());
1914 vector_data->ts = 0;
1915 flow_id = queue_info->event & 0xFFFFF;
1917 flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
1918 vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
1922 rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
1923 struct eth_device_info *dev_info, int32_t rx_queue_id)
1925 struct eth_rx_vector_data *vec;
1931 if (rx_adapter->nb_queues == 0)
1934 if (rx_queue_id == -1) {
1935 uint16_t nb_rx_queues;
1938 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1939 for (i = 0; i < nb_rx_queues; i++)
1940 rxa_sw_del(rx_adapter, dev_info, i);
1944 /* Push all the partial event vectors to event device. */
1945 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1946 if (vec->queue != rx_queue_id)
1948 rxa_vector_expire(vec, rx_adapter);
1949 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
1952 pollq = rxa_polled_queue(dev_info, rx_queue_id);
1953 intrq = rxa_intr_queue(dev_info, rx_queue_id);
1954 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1955 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
1956 rx_adapter->num_rx_polled -= pollq;
1957 dev_info->nb_rx_poll -= pollq;
1958 rx_adapter->num_rx_intr -= intrq;
1959 dev_info->nb_rx_intr -= intrq;
1960 dev_info->nb_shared_intr -= intrq && sintrq;
1961 if (rx_adapter->use_queue_event_buf) {
1962 struct eth_event_enqueue_buffer *event_buf =
1963 dev_info->rx_queue[rx_queue_id].event_buf;
1964 struct rte_event_eth_rx_adapter_stats *stats =
1965 dev_info->rx_queue[rx_queue_id].stats;
1966 rte_free(event_buf->events);
1967 rte_free(event_buf);
1969 dev_info->rx_queue[rx_queue_id].event_buf = NULL;
1970 dev_info->rx_queue[rx_queue_id].stats = NULL;
1975 rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
1976 struct eth_device_info *dev_info, int32_t rx_queue_id,
1977 const struct rte_event_eth_rx_adapter_queue_conf *conf)
1979 struct eth_rx_queue_info *queue_info;
1980 const struct rte_event *ev = &conf->ev;
1984 struct rte_event *qi_ev;
1985 struct eth_event_enqueue_buffer *new_rx_buf = NULL;
1986 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1987 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1990 if (rx_queue_id == -1) {
1991 uint16_t nb_rx_queues;
1994 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1995 for (i = 0; i < nb_rx_queues; i++) {
1996 ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
2003 pollq = rxa_polled_queue(dev_info, rx_queue_id);
2004 intrq = rxa_intr_queue(dev_info, rx_queue_id);
2005 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
2007 queue_info = &dev_info->rx_queue[rx_queue_id];
2008 queue_info->wt = conf->servicing_weight;
2010 qi_ev = (struct rte_event *)&queue_info->event;
2011 qi_ev->event = ev->event;
2012 qi_ev->op = RTE_EVENT_OP_NEW;
2013 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
2014 qi_ev->sub_event_type = 0;
2016 if (conf->rx_queue_flags &
2017 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
2018 queue_info->flow_id_mask = ~0;
2022 if (conf->rx_queue_flags &
2023 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2024 queue_info->ena_vector = 1;
2025 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
2026 rxa_set_vector_data(queue_info, conf->vector_sz,
2027 conf->vector_timeout_ns, conf->vector_mp,
2028 rx_queue_id, dev_info->dev->data->port_id);
2029 rx_adapter->ena_vector = 1;
2030 rx_adapter->vector_tmo_ticks =
2031 rx_adapter->vector_tmo_ticks ?
2032 RTE_MIN(queue_info->vector_data
2033 .vector_timeout_ticks >>
2035 rx_adapter->vector_tmo_ticks) :
2036 queue_info->vector_data.vector_timeout_ticks >>
2040 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
2041 if (rxa_polled_queue(dev_info, rx_queue_id)) {
2042 rx_adapter->num_rx_polled += !pollq;
2043 dev_info->nb_rx_poll += !pollq;
2044 rx_adapter->num_rx_intr -= intrq;
2045 dev_info->nb_rx_intr -= intrq;
2046 dev_info->nb_shared_intr -= intrq && sintrq;
2049 if (rxa_intr_queue(dev_info, rx_queue_id)) {
2050 rx_adapter->num_rx_polled -= pollq;
2051 dev_info->nb_rx_poll -= pollq;
2052 rx_adapter->num_rx_intr += !intrq;
2053 dev_info->nb_rx_intr += !intrq;
2054 dev_info->nb_shared_intr += !intrq && sintrq;
2055 if (dev_info->nb_shared_intr == 1) {
2056 if (dev_info->multi_intr_cap)
2057 dev_info->next_q_idx =
2058 RTE_MAX_RXTX_INTR_VEC_ID - 1;
2060 dev_info->next_q_idx = 0;
2064 if (!rx_adapter->use_queue_event_buf)
2067 new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2068 sizeof(*new_rx_buf), 0,
2069 rte_eth_dev_socket_id(eth_dev_id));
2070 if (new_rx_buf == NULL) {
2071 RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2072 "dev_id: %d queue_id: %d",
2073 eth_dev_id, rx_queue_id);
2077 new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2078 new_rx_buf->events_size += (2 * BATCH_SIZE);
2079 new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2080 sizeof(struct rte_event) *
2081 new_rx_buf->events_size, 0,
2082 rte_eth_dev_socket_id(eth_dev_id));
2083 if (new_rx_buf->events == NULL) {
2084 rte_free(new_rx_buf);
2085 RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2086 "dev_id: %d queue_id: %d",
2087 eth_dev_id, rx_queue_id);
2091 queue_info->event_buf = new_rx_buf;
2093 /* Allocate storage for adapter queue stats */
2094 stats = rte_zmalloc_socket("rx_queue_stats",
2096 rte_eth_dev_socket_id(eth_dev_id));
2097 if (stats == NULL) {
2098 rte_free(new_rx_buf->events);
2099 rte_free(new_rx_buf);
2100 RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
2101 " dev_id: %d queue_id: %d",
2102 eth_dev_id, rx_queue_id);
2106 queue_info->stats = stats;
2112 rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
2114 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2116 struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
2117 struct rte_event_eth_rx_adapter_queue_conf temp_conf;
2119 struct eth_rx_poll_entry *rx_poll;
2120 struct eth_rx_queue_info *rx_queue;
2122 uint16_t nb_rx_queues;
2123 uint32_t nb_rx_poll, nb_wrr;
2124 uint32_t nb_rx_intr;
2128 if (queue_conf->servicing_weight == 0) {
2129 struct rte_eth_dev_data *data = dev_info->dev->data;
2131 temp_conf = *queue_conf;
2132 if (!data->dev_conf.intr_conf.rxq) {
2133 /* If Rx interrupts are disabled set wt = 1 */
2134 temp_conf.servicing_weight = 1;
2136 queue_conf = &temp_conf;
2138 if (queue_conf->servicing_weight == 0 &&
2139 rx_adapter->use_queue_event_buf) {
2141 RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2142 "not supported for interrupt queues "
2143 "dev_id: %d queue_id: %d",
2144 eth_dev_id, rx_queue_id);
2149 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2150 rx_queue = dev_info->rx_queue;
2151 wt = queue_conf->servicing_weight;
2153 if (dev_info->rx_queue == NULL) {
2154 dev_info->rx_queue =
2155 rte_zmalloc_socket(rx_adapter->mem_name,
2157 sizeof(struct eth_rx_queue_info), 0,
2158 rx_adapter->socket_id);
2159 if (dev_info->rx_queue == NULL)
2165 rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
2166 queue_conf->servicing_weight,
2167 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2169 if (dev_info->dev->intr_handle)
2170 dev_info->multi_intr_cap =
2171 rte_intr_cap_multiple(dev_info->dev->intr_handle);
2173 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2176 goto err_free_rxqueue;
2179 num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
2181 ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
2183 goto err_free_rxqueue;
2185 ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
2187 goto err_free_rxqueue;
2191 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2192 num_intr_vec = rxa_nb_intr_vect(dev_info,
2194 /* interrupt based queues are being converted to
2195 * poll mode queues, delete the interrupt configuration
2198 ret = rxa_del_intr_queue(rx_adapter,
2199 dev_info, rx_queue_id);
2201 goto err_free_rxqueue;
2205 if (nb_rx_intr == 0) {
2206 ret = rxa_free_intr_resources(rx_adapter);
2208 goto err_free_rxqueue;
2214 if (rx_queue_id == -1) {
2215 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
2216 dev_info->intr_queue[i] = i;
2218 if (!rxa_intr_queue(dev_info, rx_queue_id))
2219 dev_info->intr_queue[nb_rx_intr - 1] =
2226 ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2228 goto err_free_rxqueue;
2229 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2231 rte_free(rx_adapter->eth_rx_poll);
2232 rte_free(rx_adapter->wrr_sched);
2234 rx_adapter->eth_rx_poll = rx_poll;
2235 rx_adapter->wrr_sched = rx_wrr;
2236 rx_adapter->wrr_len = nb_wrr;
2237 rx_adapter->num_intr_vec += num_intr_vec;
2241 if (rx_queue == NULL) {
2242 rte_free(dev_info->rx_queue);
2243 dev_info->rx_queue = NULL;
2253 rxa_ctrl(uint8_t id, int start)
2255 struct event_eth_rx_adapter *rx_adapter;
2256 struct rte_eventdev *dev;
2257 struct eth_device_info *dev_info;
2259 int use_service = 0;
2262 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2263 rx_adapter = rxa_id_to_adapter(id);
2264 if (rx_adapter == NULL)
2267 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2269 RTE_ETH_FOREACH_DEV(i) {
2270 dev_info = &rx_adapter->eth_devices[i];
2271 /* if start check for num dev queues */
2272 if (start && !dev_info->nb_dev_queues)
2274 /* if stop check if dev has been started */
2275 if (stop && !dev_info->dev_rx_started)
2277 use_service |= !dev_info->internal_event_port;
2278 dev_info->dev_rx_started = start;
2279 if (dev_info->internal_event_port == 0)
2281 start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
2282 &rte_eth_devices[i]) :
2283 (*dev->dev_ops->eth_rx_adapter_stop)(dev,
2284 &rte_eth_devices[i]);
2288 rte_spinlock_lock(&rx_adapter->rx_lock);
2289 rx_adapter->rxa_started = start;
2290 rte_service_runstate_set(rx_adapter->service_id, start);
2291 rte_spinlock_unlock(&rx_adapter->rx_lock);
2298 rxa_create(uint8_t id, uint8_t dev_id,
2299 struct rte_event_eth_rx_adapter_params *rxa_params,
2300 rte_event_eth_rx_adapter_conf_cb conf_cb,
2303 struct event_eth_rx_adapter *rx_adapter;
2304 struct eth_event_enqueue_buffer *buf;
2305 struct rte_event *events;
2309 char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
2310 const uint8_t default_rss_key[] = {
2311 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
2312 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
2313 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
2314 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
2315 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
2318 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2319 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2321 if (conf_cb == NULL)
2324 if (event_eth_rx_adapter == NULL) {
2325 ret = rte_event_eth_rx_adapter_init();
2330 rx_adapter = rxa_id_to_adapter(id);
2331 if (rx_adapter != NULL) {
2332 RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
2336 socket_id = rte_event_dev_socket_id(dev_id);
2337 snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
2338 "rte_event_eth_rx_adapter_%d",
2341 rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
2342 RTE_CACHE_LINE_SIZE, socket_id);
2343 if (rx_adapter == NULL) {
2344 RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
2348 rx_adapter->eventdev_id = dev_id;
2349 rx_adapter->socket_id = socket_id;
2350 rx_adapter->conf_cb = conf_cb;
2351 rx_adapter->conf_arg = conf_arg;
2352 rx_adapter->id = id;
2353 TAILQ_INIT(&rx_adapter->vector_list);
2354 strcpy(rx_adapter->mem_name, mem_name);
2355 rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
2357 sizeof(struct eth_device_info), 0,
2359 rte_convert_rss_key((const uint32_t *)default_rss_key,
2360 (uint32_t *)rx_adapter->rss_key_be,
2361 RTE_DIM(default_rss_key));
2363 if (rx_adapter->eth_devices == NULL) {
2364 RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
2365 rte_free(rx_adapter);
2369 rte_spinlock_init(&rx_adapter->rx_lock);
2371 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
2372 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
2374 /* Rx adapter event buffer allocation */
2375 rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2377 if (!rx_adapter->use_queue_event_buf) {
2378 buf = &rx_adapter->event_enqueue_buffer;
2379 buf->events_size = rxa_params->event_buf_size;
2381 events = rte_zmalloc_socket(rx_adapter->mem_name,
2382 buf->events_size * sizeof(*events),
2384 if (events == NULL) {
2385 RTE_EDEV_LOG_ERR("Failed to allocate memory "
2386 "for adapter event buffer");
2387 rte_free(rx_adapter->eth_devices);
2388 rte_free(rx_adapter);
2392 rx_adapter->event_enqueue_buffer.events = events;
2395 event_eth_rx_adapter[id] = rx_adapter;
2397 if (conf_cb == rxa_default_conf_cb)
2398 rx_adapter->default_cb_arg = 1;
2400 if (rte_mbuf_dyn_rx_timestamp_register(
2401 &event_eth_rx_timestamp_dynfield_offset,
2402 &event_eth_rx_timestamp_dynflag) != 0) {
2403 RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
2407 rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
2413 rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2414 rte_event_eth_rx_adapter_conf_cb conf_cb,
2417 struct rte_event_eth_rx_adapter_params rxa_params = {0};
2419 /* use default values for adapter params */
2420 rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2421 rxa_params.use_queue_event_buf = false;
2423 return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2427 rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2428 struct rte_event_port_conf *port_config,
2429 struct rte_event_eth_rx_adapter_params *rxa_params)
2431 struct rte_event_port_conf *pc;
2433 struct rte_event_eth_rx_adapter_params temp_params = {0};
2435 if (port_config == NULL)
2438 if (rxa_params == NULL) {
2439 /* use default values if rxa_params is NULL */
2440 rxa_params = &temp_params;
2441 rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2442 rxa_params->use_queue_event_buf = false;
2443 } else if ((!rxa_params->use_queue_event_buf &&
2444 rxa_params->event_buf_size == 0) ||
2445 (rxa_params->use_queue_event_buf &&
2446 rxa_params->event_buf_size != 0)) {
2447 RTE_EDEV_LOG_ERR("Invalid adapter params\n");
2449 } else if (!rxa_params->use_queue_event_buf) {
2450 /* adjust event buff size with BATCH_SIZE used for fetching
2451 * packets from NIC rx queues to get full buffer utilization
2452 * and prevent unnecessary rollovers.
2455 rxa_params->event_buf_size =
2456 RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
2457 rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2460 pc = rte_malloc(NULL, sizeof(*pc), 0);
2466 ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2474 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
2475 struct rte_event_port_conf *port_config)
2477 struct rte_event_port_conf *pc;
2480 if (port_config == NULL)
2483 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2485 pc = rte_malloc(NULL, sizeof(*pc), 0);
2490 ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
2491 rxa_default_conf_cb,
2499 rte_event_eth_rx_adapter_free(uint8_t id)
2501 struct event_eth_rx_adapter *rx_adapter;
2503 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2505 rx_adapter = rxa_id_to_adapter(id);
2506 if (rx_adapter == NULL)
2509 if (rx_adapter->nb_queues) {
2510 RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
2511 rx_adapter->nb_queues);
2515 if (rx_adapter->default_cb_arg)
2516 rte_free(rx_adapter->conf_arg);
2517 rte_free(rx_adapter->eth_devices);
2518 if (!rx_adapter->use_queue_event_buf)
2519 rte_free(rx_adapter->event_enqueue_buffer.events);
2520 rte_free(rx_adapter);
2521 event_eth_rx_adapter[id] = NULL;
2523 rte_eventdev_trace_eth_rx_adapter_free(id);
2528 rte_event_eth_rx_adapter_queue_add(uint8_t id,
2529 uint16_t eth_dev_id,
2530 int32_t rx_queue_id,
2531 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2535 struct event_eth_rx_adapter *rx_adapter;
2536 struct rte_eventdev *dev;
2537 struct eth_device_info *dev_info;
2538 struct rte_event_eth_rx_adapter_vector_limits limits;
2540 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2541 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2543 rx_adapter = rxa_id_to_adapter(id);
2544 if ((rx_adapter == NULL) || (queue_conf == NULL))
2547 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2548 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2552 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2553 "eth port %" PRIu16, id, eth_dev_id);
2557 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
2558 && (queue_conf->rx_queue_flags &
2559 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
2560 RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
2561 " eth port: %" PRIu16 " adapter id: %" PRIu8,
2566 if (queue_conf->rx_queue_flags &
2567 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2569 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
2570 RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2571 " eth port: %" PRIu16
2572 " adapter id: %" PRIu8,
2577 ret = rte_event_eth_rx_adapter_vector_limits_get(
2578 rx_adapter->eventdev_id, eth_dev_id, &limits);
2580 RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2581 " eth port: %" PRIu16
2582 " adapter id: %" PRIu8,
2586 if (queue_conf->vector_sz < limits.min_sz ||
2587 queue_conf->vector_sz > limits.max_sz ||
2588 queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2589 queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2590 queue_conf->vector_mp == NULL) {
2591 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2592 " eth port: %" PRIu16
2593 " adapter id: %" PRIu8,
2597 if (queue_conf->vector_mp->elt_size <
2598 (sizeof(struct rte_event_vector) +
2599 (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2600 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2601 " eth port: %" PRIu16
2602 " adapter id: %" PRIu8,
2608 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
2609 (rx_queue_id != -1)) {
2610 RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
2611 "event queue, eth port: %" PRIu16 " adapter id: %"
2612 PRIu8, eth_dev_id, id);
2616 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2617 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2618 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2619 (uint16_t)rx_queue_id);
2623 if ((rx_adapter->use_queue_event_buf &&
2624 queue_conf->event_buf_size == 0) ||
2625 (!rx_adapter->use_queue_event_buf &&
2626 queue_conf->event_buf_size != 0)) {
2627 RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2631 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2633 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2634 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
2636 if (dev_info->rx_queue == NULL) {
2637 dev_info->rx_queue =
2638 rte_zmalloc_socket(rx_adapter->mem_name,
2639 dev_info->dev->data->nb_rx_queues *
2640 sizeof(struct eth_rx_queue_info), 0,
2641 rx_adapter->socket_id);
2642 if (dev_info->rx_queue == NULL)
2646 ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
2647 &rte_eth_devices[eth_dev_id],
2648 rx_queue_id, queue_conf);
2650 dev_info->internal_event_port = 1;
2651 rxa_update_queue(rx_adapter,
2652 &rx_adapter->eth_devices[eth_dev_id],
2657 rte_spinlock_lock(&rx_adapter->rx_lock);
2658 dev_info->internal_event_port = 0;
2659 ret = rxa_init_service(rx_adapter, id);
2661 uint32_t service_id = rx_adapter->service_id;
2662 ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
2664 rte_service_component_runstate_set(service_id,
2665 rxa_sw_adapter_queue_count(rx_adapter));
2667 rte_spinlock_unlock(&rx_adapter->rx_lock);
2670 rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
2671 rx_queue_id, queue_conf, ret);
2679 rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
2681 limits->max_sz = MAX_VECTOR_SIZE;
2682 limits->min_sz = MIN_VECTOR_SIZE;
2683 limits->max_timeout_ns = MAX_VECTOR_NS;
2684 limits->min_timeout_ns = MIN_VECTOR_NS;
2690 rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
2691 int32_t rx_queue_id)
2694 struct rte_eventdev *dev;
2695 struct event_eth_rx_adapter *rx_adapter;
2696 struct eth_device_info *dev_info;
2698 uint32_t nb_rx_poll = 0;
2699 uint32_t nb_wrr = 0;
2700 uint32_t nb_rx_intr;
2701 struct eth_rx_poll_entry *rx_poll = NULL;
2702 uint32_t *rx_wrr = NULL;
2705 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2706 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2708 rx_adapter = rxa_id_to_adapter(id);
2709 if (rx_adapter == NULL)
2712 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2713 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2719 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2720 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2721 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2722 (uint16_t)rx_queue_id);
2726 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2728 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2729 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
2731 ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
2732 &rte_eth_devices[eth_dev_id],
2735 rxa_update_queue(rx_adapter,
2736 &rx_adapter->eth_devices[eth_dev_id],
2739 if (dev_info->nb_dev_queues == 0) {
2740 rte_free(dev_info->rx_queue);
2741 dev_info->rx_queue = NULL;
2745 rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
2746 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2748 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2753 rte_spinlock_lock(&rx_adapter->rx_lock);
2756 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2758 num_intr_vec = rxa_nb_intr_vect(dev_info,
2760 ret = rxa_del_intr_queue(rx_adapter, dev_info,
2766 if (nb_rx_intr == 0) {
2767 ret = rxa_free_intr_resources(rx_adapter);
2772 rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
2773 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2775 rte_free(rx_adapter->eth_rx_poll);
2776 rte_free(rx_adapter->wrr_sched);
2778 if (nb_rx_intr == 0) {
2779 rte_free(dev_info->intr_queue);
2780 dev_info->intr_queue = NULL;
2783 rx_adapter->eth_rx_poll = rx_poll;
2784 rx_adapter->wrr_sched = rx_wrr;
2785 rx_adapter->wrr_len = nb_wrr;
2787 * reset next poll start position (wrr_pos) to avoid buffer
2788 * overrun when wrr_len is reduced in case of queue delete
2790 rx_adapter->wrr_pos = 0;
2791 rx_adapter->num_intr_vec += num_intr_vec;
2793 if (dev_info->nb_dev_queues == 0) {
2794 rte_free(dev_info->rx_queue);
2795 dev_info->rx_queue = NULL;
2798 rte_spinlock_unlock(&rx_adapter->rx_lock);
2805 rte_service_component_runstate_set(rx_adapter->service_id,
2806 rxa_sw_adapter_queue_count(rx_adapter));
2809 rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
2815 rte_event_eth_rx_adapter_vector_limits_get(
2816 uint8_t dev_id, uint16_t eth_port_id,
2817 struct rte_event_eth_rx_adapter_vector_limits *limits)
2819 struct rte_eventdev *dev;
2823 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2824 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
2829 dev = &rte_eventdevs[dev_id];
2831 ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
2833 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2834 "eth port %" PRIu16,
2835 dev_id, eth_port_id);
2839 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2840 RTE_FUNC_PTR_OR_ERR_RET(
2841 *dev->dev_ops->eth_rx_adapter_vector_limits_get,
2843 ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
2844 dev, &rte_eth_devices[eth_port_id], limits);
2846 ret = rxa_sw_vector_limits(limits);
2853 rte_event_eth_rx_adapter_start(uint8_t id)
2855 rte_eventdev_trace_eth_rx_adapter_start(id);
2856 return rxa_ctrl(id, 1);
2860 rte_event_eth_rx_adapter_stop(uint8_t id)
2862 rte_eventdev_trace_eth_rx_adapter_stop(id);
2863 return rxa_ctrl(id, 0);
2867 rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
2869 struct rte_event_eth_rx_adapter_stats *q_stats;
2871 q_stats = queue_info->stats;
2872 memset(q_stats, 0, sizeof(*q_stats));
2876 rte_event_eth_rx_adapter_stats_get(uint8_t id,
2877 struct rte_event_eth_rx_adapter_stats *stats)
2879 struct event_eth_rx_adapter *rx_adapter;
2880 struct eth_event_enqueue_buffer *buf;
2881 struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
2882 struct rte_event_eth_rx_adapter_stats dev_stats;
2883 struct rte_eventdev *dev;
2884 struct eth_device_info *dev_info;
2885 struct eth_rx_queue_info *queue_info;
2886 struct rte_event_eth_rx_adapter_stats *q_stats;
2890 if (rxa_memzone_lookup())
2893 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2895 rx_adapter = rxa_id_to_adapter(id);
2896 if (rx_adapter == NULL || stats == NULL)
2899 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2900 memset(stats, 0, sizeof(*stats));
2902 if (rx_adapter->service_inited)
2903 *stats = rx_adapter->stats;
2905 RTE_ETH_FOREACH_DEV(i) {
2906 dev_info = &rx_adapter->eth_devices[i];
2908 if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
2910 for (j = 0; j < dev_info->dev->data->nb_rx_queues;
2912 queue_info = &dev_info->rx_queue[j];
2913 if (!queue_info->queue_enabled)
2915 q_stats = queue_info->stats;
2917 stats->rx_packets += q_stats->rx_packets;
2918 stats->rx_poll_count += q_stats->rx_poll_count;
2919 stats->rx_enq_count += q_stats->rx_enq_count;
2920 stats->rx_enq_retry += q_stats->rx_enq_retry;
2921 stats->rx_dropped += q_stats->rx_dropped;
2922 stats->rx_enq_block_cycles +=
2923 q_stats->rx_enq_block_cycles;
2927 if (dev_info->internal_event_port == 0 ||
2928 dev->dev_ops->eth_rx_adapter_stats_get == NULL)
2930 ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
2931 &rte_eth_devices[i],
2935 dev_stats_sum.rx_packets += dev_stats.rx_packets;
2936 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
2939 buf = &rx_adapter->event_enqueue_buffer;
2940 stats->rx_packets += dev_stats_sum.rx_packets;
2941 stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2942 stats->rx_event_buf_count = buf->count;
2943 stats->rx_event_buf_size = buf->events_size;
2949 rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
2950 uint16_t eth_dev_id,
2951 uint16_t rx_queue_id,
2952 struct rte_event_eth_rx_adapter_queue_stats *stats)
2954 struct event_eth_rx_adapter *rx_adapter;
2955 struct eth_device_info *dev_info;
2956 struct eth_rx_queue_info *queue_info;
2957 struct eth_event_enqueue_buffer *event_buf;
2958 struct rte_event_eth_rx_adapter_stats *q_stats;
2959 struct rte_eventdev *dev;
2961 if (rxa_memzone_lookup())
2964 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2965 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2967 rx_adapter = rxa_id_to_adapter(id);
2969 if (rx_adapter == NULL || stats == NULL)
2972 if (!rx_adapter->use_queue_event_buf)
2975 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2976 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
2980 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2981 if (dev_info->rx_queue == NULL ||
2982 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
2983 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
2987 if (dev_info->internal_event_port == 0) {
2988 queue_info = &dev_info->rx_queue[rx_queue_id];
2989 event_buf = queue_info->event_buf;
2990 q_stats = queue_info->stats;
2992 stats->rx_event_buf_count = event_buf->count;
2993 stats->rx_event_buf_size = event_buf->events_size;
2994 stats->rx_packets = q_stats->rx_packets;
2995 stats->rx_poll_count = q_stats->rx_poll_count;
2996 stats->rx_dropped = q_stats->rx_dropped;
2999 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3000 if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
3001 return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
3002 &rte_eth_devices[eth_dev_id],
3003 rx_queue_id, stats);
3010 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
3012 struct event_eth_rx_adapter *rx_adapter;
3013 struct rte_eventdev *dev;
3014 struct eth_device_info *dev_info;
3015 struct eth_rx_queue_info *queue_info;
3018 if (rxa_memzone_lookup())
3021 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3023 rx_adapter = rxa_id_to_adapter(id);
3024 if (rx_adapter == NULL)
3027 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3029 RTE_ETH_FOREACH_DEV(i) {
3030 dev_info = &rx_adapter->eth_devices[i];
3032 if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
3034 for (j = 0; j < dev_info->dev->data->nb_rx_queues;
3036 queue_info = &dev_info->rx_queue[j];
3037 if (!queue_info->queue_enabled)
3039 rxa_queue_stats_reset(queue_info);
3043 if (dev_info->internal_event_port == 0 ||
3044 dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
3046 (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
3047 &rte_eth_devices[i]);
3050 memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
3056 rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
3057 uint16_t eth_dev_id,
3058 uint16_t rx_queue_id)
3060 struct event_eth_rx_adapter *rx_adapter;
3061 struct eth_device_info *dev_info;
3062 struct eth_rx_queue_info *queue_info;
3063 struct rte_eventdev *dev;
3065 if (rxa_memzone_lookup())
3068 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3069 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3071 rx_adapter = rxa_id_to_adapter(id);
3072 if (rx_adapter == NULL)
3075 if (!rx_adapter->use_queue_event_buf)
3078 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3079 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
3083 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3085 if (dev_info->rx_queue == NULL ||
3086 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3087 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3091 if (dev_info->internal_event_port == 0) {
3092 queue_info = &dev_info->rx_queue[rx_queue_id];
3093 rxa_queue_stats_reset(queue_info);
3096 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3097 if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
3098 return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
3099 &rte_eth_devices[eth_dev_id],
3107 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
3109 struct event_eth_rx_adapter *rx_adapter;
3111 if (rxa_memzone_lookup())
3114 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3116 rx_adapter = rxa_id_to_adapter(id);
3117 if (rx_adapter == NULL || service_id == NULL)
3120 if (rx_adapter->service_inited)
3121 *service_id = rx_adapter->service_id;
3123 return rx_adapter->service_inited ? 0 : -ESRCH;
3127 rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
3129 struct event_eth_rx_adapter *rx_adapter;
3131 if (rxa_memzone_lookup())
3134 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3136 rx_adapter = rxa_id_to_adapter(id);
3137 if (rx_adapter == NULL || event_port_id == NULL)
3140 if (rx_adapter->service_inited)
3141 *event_port_id = rx_adapter->event_port_id;
3143 return rx_adapter->service_inited ? 0 : -ESRCH;
3147 rte_event_eth_rx_adapter_cb_register(uint8_t id,
3148 uint16_t eth_dev_id,
3149 rte_event_eth_rx_adapter_cb_fn cb_fn,
3152 struct event_eth_rx_adapter *rx_adapter;
3153 struct eth_device_info *dev_info;
3157 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3158 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3160 rx_adapter = rxa_id_to_adapter(id);
3161 if (rx_adapter == NULL)
3164 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3165 if (dev_info->rx_queue == NULL)
3168 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
3172 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
3173 "eth port %" PRIu16, id, eth_dev_id);
3177 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
3178 RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
3179 PRIu16, eth_dev_id);
3183 rte_spinlock_lock(&rx_adapter->rx_lock);
3184 dev_info->cb_fn = cb_fn;
3185 dev_info->cb_arg = cb_arg;
3186 rte_spinlock_unlock(&rx_adapter->rx_lock);
3192 rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
3193 uint16_t eth_dev_id,
3194 uint16_t rx_queue_id,
3195 struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
3197 struct rte_eventdev *dev;
3198 struct event_eth_rx_adapter *rx_adapter;
3199 struct eth_device_info *dev_info;
3200 struct eth_rx_queue_info *queue_info;
3201 struct rte_event *qi_ev;
3204 if (rxa_memzone_lookup())
3207 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3208 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3210 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3211 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3215 if (queue_conf == NULL) {
3216 RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3220 rx_adapter = rxa_id_to_adapter(id);
3221 if (rx_adapter == NULL)
3224 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3225 if (dev_info->rx_queue == NULL ||
3226 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3227 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3231 queue_info = &dev_info->rx_queue[rx_queue_id];
3232 qi_ev = (struct rte_event *)&queue_info->event;
3234 memset(queue_conf, 0, sizeof(*queue_conf));
3235 queue_conf->rx_queue_flags = 0;
3236 if (queue_info->flow_id_mask != 0)
3237 queue_conf->rx_queue_flags |=
3238 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3239 queue_conf->servicing_weight = queue_info->wt;
3241 memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
3243 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3244 if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3245 ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3246 &rte_eth_devices[eth_dev_id],
3255 #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
3258 handle_rxa_stats(const char *cmd __rte_unused,
3260 struct rte_tel_data *d)
3262 uint8_t rx_adapter_id;
3263 struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3265 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3268 /* Get Rx adapter ID from parameter string */
3269 rx_adapter_id = atoi(params);
3270 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3272 /* Get Rx adapter stats */
3273 if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3275 RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
3279 rte_tel_data_start_dict(d);
3280 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3281 RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3282 RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3283 RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3284 RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3285 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3286 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3287 RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3288 RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3289 RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3290 RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3291 RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3297 handle_rxa_stats_reset(const char *cmd __rte_unused,
3299 struct rte_tel_data *d __rte_unused)
3301 uint8_t rx_adapter_id;
3303 if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
3306 /* Get Rx adapter ID from parameter string */
3307 rx_adapter_id = atoi(params);
3308 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3310 /* Reset Rx adapter stats */
3311 if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3312 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
3320 handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3322 struct rte_tel_data *d)
3324 uint8_t rx_adapter_id;
3325 uint16_t rx_queue_id;
3327 char *token, *l_params;
3328 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3330 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3333 /* Get Rx adapter ID from parameter string */
3334 l_params = strdup(params);
3335 token = strtok(l_params, ",");
3336 rx_adapter_id = strtoul(token, NULL, 10);
3337 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3339 token = strtok(NULL, ",");
3340 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3343 /* Get device ID from parameter string */
3344 eth_dev_id = strtoul(token, NULL, 10);
3345 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3347 token = strtok(NULL, ",");
3348 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3351 /* Get Rx queue ID from parameter string */
3352 rx_queue_id = strtoul(token, NULL, 10);
3353 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3354 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3358 token = strtok(NULL, "\0");
3360 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3361 " telemetry command, ignoring");
3363 if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3364 rx_queue_id, &queue_conf)) {
3365 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3369 rte_tel_data_start_dict(d);
3370 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3371 rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3372 rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3373 RXA_ADD_DICT(queue_conf, rx_queue_flags);
3374 RXA_ADD_DICT(queue_conf, servicing_weight);
3375 RXA_ADD_DICT(queue_conf.ev, queue_id);
3376 RXA_ADD_DICT(queue_conf.ev, sched_type);
3377 RXA_ADD_DICT(queue_conf.ev, priority);
3378 RXA_ADD_DICT(queue_conf.ev, flow_id);
3384 handle_rxa_get_queue_stats(const char *cmd __rte_unused,
3386 struct rte_tel_data *d)
3388 uint8_t rx_adapter_id;
3389 uint16_t rx_queue_id;
3391 char *token, *l_params;
3392 struct rte_event_eth_rx_adapter_queue_stats q_stats;
3394 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3397 /* Get Rx adapter ID from parameter string */
3398 l_params = strdup(params);
3399 token = strtok(l_params, ",");
3400 rx_adapter_id = strtoul(token, NULL, 10);
3401 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3403 token = strtok(NULL, ",");
3404 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3407 /* Get device ID from parameter string */
3408 eth_dev_id = strtoul(token, NULL, 10);
3409 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3411 token = strtok(NULL, ",");
3412 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3415 /* Get Rx queue ID from parameter string */
3416 rx_queue_id = strtoul(token, NULL, 10);
3417 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3418 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3422 token = strtok(NULL, "\0");
3424 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3425 " telemetry command, ignoring");
3427 if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
3428 rx_queue_id, &q_stats)) {
3429 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
3433 rte_tel_data_start_dict(d);
3434 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3435 rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3436 rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3437 RXA_ADD_DICT(q_stats, rx_event_buf_count);
3438 RXA_ADD_DICT(q_stats, rx_event_buf_size);
3439 RXA_ADD_DICT(q_stats, rx_poll_count);
3440 RXA_ADD_DICT(q_stats, rx_packets);
3441 RXA_ADD_DICT(q_stats, rx_dropped);
3447 handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
3449 struct rte_tel_data *d __rte_unused)
3451 uint8_t rx_adapter_id;
3452 uint16_t rx_queue_id;
3454 char *token, *l_params;
3456 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3459 /* Get Rx adapter ID from parameter string */
3460 l_params = strdup(params);
3461 token = strtok(l_params, ",");
3462 rx_adapter_id = strtoul(token, NULL, 10);
3463 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3465 token = strtok(NULL, ",");
3466 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3469 /* Get device ID from parameter string */
3470 eth_dev_id = strtoul(token, NULL, 10);
3471 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3473 token = strtok(NULL, ",");
3474 if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3477 /* Get Rx queue ID from parameter string */
3478 rx_queue_id = strtoul(token, NULL, 10);
3479 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3480 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3484 token = strtok(NULL, "\0");
3486 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3487 " telemetry command, ignoring");
3489 if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
3492 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
3499 RTE_INIT(rxa_init_telemetry)
3501 rte_telemetry_register_cmd("/eventdev/rxa_stats",
3503 "Returns Rx adapter stats. Parameter: rxa_id");
3505 rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3506 handle_rxa_stats_reset,
3507 "Reset Rx adapter stats. Parameter: rxa_id");
3509 rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3510 handle_rxa_get_queue_conf,
3511 "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
3513 rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
3514 handle_rxa_get_queue_stats,
3515 "Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
3517 rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
3518 handle_rxa_queue_stats_reset,
3519 "Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");