1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation.
10 #include <rte_cycles.h>
11 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_service_component.h>
18 #include <rte_thash.h>
19 #include <rte_interrupts.h>
20 #include <rte_mbuf_dyn.h>
21 #include <rte_telemetry.h>
23 #include "rte_eventdev.h"
24 #include "eventdev_pmd.h"
25 #include "eventdev_trace.h"
26 #include "rte_event_eth_rx_adapter.h"
29 #define BLOCK_CNT_THRESHOLD 10
30 #define ETH_EVENT_BUFFER_SIZE (6*BATCH_SIZE)
31 #define MAX_VECTOR_SIZE 1024
32 #define MIN_VECTOR_SIZE 4
33 #define MAX_VECTOR_NS 1E9
34 #define MIN_VECTOR_NS 1E5
36 #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
37 #define ETH_RX_ADAPTER_MEM_NAME_LEN 32
39 #define RSS_KEY_SIZE 40
40 /* value written to intr thread pipe to signal thread exit */
41 #define ETH_BRIDGE_INTR_THREAD_EXIT 1
42 /* Sentinel value to detect initialized file handle */
45 #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
48 * Used to store port and queue ID of interrupting Rx queue
60 * There is an instance of this struct per polled Rx queue added to the
63 struct eth_rx_poll_entry {
64 /* Eth port to poll */
66 /* Eth rx queue to poll */
70 struct eth_rx_vector_data {
71 TAILQ_ENTRY(eth_rx_vector_data) next;
74 uint16_t max_vector_count;
77 uint64_t vector_timeout_ticks;
78 struct rte_mempool *vector_pool;
79 struct rte_event_vector *vector_ev;
80 } __rte_cache_aligned;
82 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
84 /* Instance per adapter */
85 struct eth_event_enqueue_buffer {
86 /* Count of events in this buffer */
88 /* Array of events in this buffer */
89 struct rte_event *events;
90 /* size of event buffer */
92 /* Event enqueue happens from head */
94 /* New packets from rte_eth_rx_burst is enqued from tail */
96 /* last element in the buffer before rollover */
101 struct event_eth_rx_adapter {
103 uint8_t rss_key_be[RSS_KEY_SIZE];
104 /* Event device identifier */
106 /* Event port identifier */
107 uint8_t event_port_id;
108 /* Flag indicating per rxq event buffer */
109 bool use_queue_event_buf;
110 /* Per ethernet device structure */
111 struct eth_device_info *eth_devices;
112 /* Lock to serialize config updates with service function */
113 rte_spinlock_t rx_lock;
114 /* Max mbufs processed in any service function invocation */
116 /* Receive queues that need to be polled */
117 struct eth_rx_poll_entry *eth_rx_poll;
118 /* Size of the eth_rx_poll array */
119 uint16_t num_rx_polled;
120 /* Weighted round robin schedule */
122 /* wrr_sched[] size */
124 /* Next entry in wrr[] to begin polling */
126 /* Event burst buffer */
127 struct eth_event_enqueue_buffer event_enqueue_buffer;
128 /* Vector enable flag */
130 /* Timestamp of previous vector expiry list traversal */
131 uint64_t prev_expiry_ts;
132 /* Minimum ticks to wait before traversing expiry list */
133 uint64_t vector_tmo_ticks;
135 struct eth_rx_vector_data_list vector_list;
136 /* Per adapter stats */
137 struct rte_event_eth_rx_adapter_stats stats;
138 /* Block count, counts up to BLOCK_CNT_THRESHOLD */
139 uint16_t enq_block_count;
141 uint64_t rx_enq_block_start_ts;
142 /* epoll fd used to wait for Rx interrupts */
144 /* Num of interrupt driven interrupt queues */
145 uint32_t num_rx_intr;
146 /* Used to send <dev id, queue id> of interrupting Rx queues from
147 * the interrupt thread to the Rx thread
149 struct rte_ring *intr_ring;
150 /* Rx Queue data (dev id, queue id) for the last non-empty
154 /* queue_data is valid */
156 /* Interrupt ring lock, synchronizes Rx thread
157 * and interrupt thread
159 rte_spinlock_t intr_ring_lock;
160 /* event array passed to rte_poll_wait */
161 struct rte_epoll_event *epoll_events;
162 /* Count of interrupt vectors in use */
163 uint32_t num_intr_vec;
164 /* Thread blocked on Rx interrupts */
165 pthread_t rx_intr_thread;
166 /* Configuration callback for rte_service configuration */
167 rte_event_eth_rx_adapter_conf_cb conf_cb;
168 /* Configuration callback argument */
170 /* Set if default_cb is being used */
172 /* Service initialization state */
173 uint8_t service_inited;
174 /* Total count of Rx queues in adapter */
176 /* Memory allocation name */
177 char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
178 /* Socket identifier cached from eventdev */
180 /* Per adapter EAL service */
182 /* Adapter started flag */
186 } __rte_cache_aligned;
189 struct eth_device_info {
190 struct rte_eth_dev *dev;
191 struct eth_rx_queue_info *rx_queue;
193 rte_event_eth_rx_adapter_cb_fn cb_fn;
194 /* Rx callback argument */
196 /* Set if ethdev->eventdev packet transfer uses a
199 uint8_t internal_event_port;
200 /* Set if the adapter is processing rx queues for
201 * this eth device and packet processing has been
202 * started, allows for the code to know if the PMD
203 * rx_adapter_stop callback needs to be invoked
205 uint8_t dev_rx_started;
206 /* Number of queues added for this device */
207 uint16_t nb_dev_queues;
208 /* Number of poll based queues
209 * If nb_rx_poll > 0, the start callback will
210 * be invoked if not already invoked
213 /* Number of interrupt based queues
214 * If nb_rx_intr > 0, the start callback will
215 * be invoked if not already invoked.
218 /* Number of queues that use the shared interrupt */
219 uint16_t nb_shared_intr;
220 /* sum(wrr(q)) for all queues within the device
221 * useful when deleting all device queues
224 /* Intr based queue index to start polling from, this is used
225 * if the number of shared interrupts is non-zero
228 /* Intr based queue indices */
229 uint16_t *intr_queue;
230 /* device generates per Rx queue interrupt for queue index
231 * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
234 /* shared interrupt enabled */
235 int shared_intr_enabled;
239 struct eth_rx_queue_info {
240 int queue_enabled; /* True if added */
243 uint16_t wt; /* Polling weight */
244 uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
246 struct eth_rx_vector_data vector_data;
247 struct eth_event_enqueue_buffer *event_buf;
248 /* use adapter stats struct for queue level stats,
249 * as same stats need to be updated for adapter and queue
251 struct rte_event_eth_rx_adapter_stats *stats;
254 static struct event_eth_rx_adapter **event_eth_rx_adapter;
256 /* Enable dynamic timestamp field in mbuf */
257 static uint64_t event_eth_rx_timestamp_dynflag;
258 static int event_eth_rx_timestamp_dynfield_offset = -1;
260 static inline rte_mbuf_timestamp_t *
261 rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
263 return RTE_MBUF_DYNFIELD(mbuf,
264 event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
268 rxa_validate_id(uint8_t id)
270 return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
273 static inline struct eth_event_enqueue_buffer *
274 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
275 uint16_t rx_queue_id,
276 struct rte_event_eth_rx_adapter_stats **stats)
278 if (rx_adapter->use_queue_event_buf) {
279 struct eth_device_info *dev_info =
280 &rx_adapter->eth_devices[eth_dev_id];
281 *stats = dev_info->rx_queue[rx_queue_id].stats;
282 return dev_info->rx_queue[rx_queue_id].event_buf;
284 *stats = &rx_adapter->stats;
285 return &rx_adapter->event_enqueue_buffer;
289 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
290 if (!rxa_validate_id(id)) { \
291 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
296 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \
297 if (!rxa_validate_id(id)) { \
298 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
304 #define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \
305 if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \
306 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token\n"); \
312 #define RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(port_id, retval) do { \
313 if (!rte_eth_dev_is_valid_port(port_id)) { \
314 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
321 rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
323 return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
326 /* Greatest common divisor */
327 static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
331 return r ? rxa_gcd_u16(b, r) : b;
334 /* Returns the next queue in the polling sequence
336 * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
339 rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
340 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
341 uint16_t gcd, int prev)
357 q = eth_rx_poll[i].eth_rx_qid;
358 d = eth_rx_poll[i].eth_dev_id;
359 w = rx_adapter->eth_devices[d].rx_queue[q].wt;
367 rxa_shared_intr(struct eth_device_info *dev_info,
372 if (dev_info->dev->intr_handle == NULL)
375 multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
376 return !multi_intr_cap ||
377 rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
381 rxa_intr_queue(struct eth_device_info *dev_info,
384 struct eth_rx_queue_info *queue_info;
386 queue_info = &dev_info->rx_queue[rx_queue_id];
387 return dev_info->rx_queue &&
388 !dev_info->internal_event_port &&
389 queue_info->queue_enabled && queue_info->wt == 0;
393 rxa_polled_queue(struct eth_device_info *dev_info,
396 struct eth_rx_queue_info *queue_info;
398 queue_info = &dev_info->rx_queue[rx_queue_id];
399 return !dev_info->internal_event_port &&
400 dev_info->rx_queue &&
401 queue_info->queue_enabled && queue_info->wt != 0;
404 /* Calculate change in number of vectors after Rx queue ID is add/deleted */
406 rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
412 nbq = dev_info->dev->data->nb_rx_queues;
413 n = 0; /* non shared count */
414 s = 0; /* shared count */
416 if (rx_queue_id == -1) {
417 for (i = 0; i < nbq; i++) {
418 if (!rxa_shared_intr(dev_info, i))
419 n += add ? !rxa_intr_queue(dev_info, i) :
420 rxa_intr_queue(dev_info, i);
422 s += add ? !rxa_intr_queue(dev_info, i) :
423 rxa_intr_queue(dev_info, i);
427 if ((add && dev_info->nb_shared_intr == 0) ||
428 (!add && dev_info->nb_shared_intr))
432 if (!rxa_shared_intr(dev_info, rx_queue_id))
433 n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
434 rxa_intr_queue(dev_info, rx_queue_id);
436 n = add ? !dev_info->nb_shared_intr :
437 dev_info->nb_shared_intr == 1;
443 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
446 rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
447 struct eth_device_info *dev_info, int rx_queue_id,
448 uint32_t *nb_rx_intr)
452 if (rx_queue_id == -1)
453 intr_diff = dev_info->nb_rx_intr;
455 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
457 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
460 /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
461 * interrupt queues could currently be poll mode Rx queues
464 rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
465 struct eth_device_info *dev_info, int rx_queue_id,
466 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
471 uint32_t wrr_len_diff;
473 if (rx_queue_id == -1) {
474 intr_diff = dev_info->dev->data->nb_rx_queues -
475 dev_info->nb_rx_intr;
476 poll_diff = dev_info->nb_rx_poll;
477 wrr_len_diff = dev_info->wrr_len;
479 intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
480 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
481 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
485 *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
486 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
487 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
490 /* Calculate size of the eth_rx_poll and wrr_sched arrays
491 * after deleting poll mode rx queues
494 rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
495 struct eth_device_info *dev_info, int rx_queue_id,
496 uint32_t *nb_rx_poll, uint32_t *nb_wrr)
499 uint32_t wrr_len_diff;
501 if (rx_queue_id == -1) {
502 poll_diff = dev_info->nb_rx_poll;
503 wrr_len_diff = dev_info->wrr_len;
505 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
506 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
510 *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
511 *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
514 /* Calculate nb_rx_* after adding poll mode rx queues
517 rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
518 struct eth_device_info *dev_info, int rx_queue_id,
519 uint16_t wt, uint32_t *nb_rx_poll,
520 uint32_t *nb_rx_intr, uint32_t *nb_wrr)
524 uint32_t wrr_len_diff;
526 if (rx_queue_id == -1) {
527 intr_diff = dev_info->nb_rx_intr;
528 poll_diff = dev_info->dev->data->nb_rx_queues -
529 dev_info->nb_rx_poll;
530 wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
533 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
534 poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
535 wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
536 wt - dev_info->rx_queue[rx_queue_id].wt :
540 *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
541 *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
542 *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
545 /* Calculate nb_rx_* after adding rx_queue_id */
547 rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
548 struct eth_device_info *dev_info, int rx_queue_id,
549 uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
553 rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
554 wt, nb_rx_poll, nb_rx_intr, nb_wrr);
556 rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
557 nb_rx_poll, nb_rx_intr, nb_wrr);
560 /* Calculate nb_rx_* after deleting rx_queue_id */
562 rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
563 struct eth_device_info *dev_info, int rx_queue_id,
564 uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
567 rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
569 rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
574 * Allocate the rx_poll array
576 static struct eth_rx_poll_entry *
577 rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
581 len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
582 RTE_CACHE_LINE_SIZE);
583 return rte_zmalloc_socket(rx_adapter->mem_name,
586 rx_adapter->socket_id);
590 * Allocate the WRR array
593 rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
597 len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
598 RTE_CACHE_LINE_SIZE);
599 return rte_zmalloc_socket(rx_adapter->mem_name,
602 rx_adapter->socket_id);
606 rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
607 uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
608 uint32_t **wrr_sched)
617 *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
618 if (*rx_poll == NULL) {
623 *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
624 if (*wrr_sched == NULL) {
631 /* Precalculate WRR polling sequence for all queues in rx_adapter */
633 rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
634 struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
642 /* Initialize variables for calculation of wrr schedule */
643 uint16_t max_wrr_pos = 0;
644 unsigned int poll_q = 0;
651 /* Generate array of all queues to poll, the size of this
654 RTE_ETH_FOREACH_DEV(d) {
655 uint16_t nb_rx_queues;
656 struct eth_device_info *dev_info =
657 &rx_adapter->eth_devices[d];
658 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
659 if (dev_info->rx_queue == NULL)
661 if (dev_info->internal_event_port)
663 dev_info->wrr_len = 0;
664 for (q = 0; q < nb_rx_queues; q++) {
665 struct eth_rx_queue_info *queue_info =
666 &dev_info->rx_queue[q];
669 if (!rxa_polled_queue(dev_info, q))
672 rx_poll[poll_q].eth_dev_id = d;
673 rx_poll[poll_q].eth_rx_qid = q;
675 dev_info->wrr_len += wt;
676 max_wt = RTE_MAX(max_wt, wt);
677 gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
682 /* Generate polling sequence based on weights */
685 for (i = 0; i < max_wrr_pos; i++) {
686 rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
687 rx_poll, max_wt, gcd, prev);
693 rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
694 struct rte_ipv6_hdr **ipv6_hdr)
696 struct rte_ether_hdr *eth_hdr =
697 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
698 struct rte_vlan_hdr *vlan_hdr;
703 switch (eth_hdr->ether_type) {
704 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
705 *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
708 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
709 *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
712 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
713 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
714 switch (vlan_hdr->eth_proto) {
715 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
716 *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
718 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
719 *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
731 /* Calculate RSS hash for IPv4/6 */
732 static inline uint32_t
733 rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
737 struct rte_ipv4_tuple ipv4_tuple;
738 struct rte_ipv6_tuple ipv6_tuple;
739 struct rte_ipv4_hdr *ipv4_hdr;
740 struct rte_ipv6_hdr *ipv6_hdr;
742 rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
745 ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
746 ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
748 input_len = RTE_THASH_V4_L3_LEN;
749 } else if (ipv6_hdr) {
750 rte_thash_load_v6_addrs(ipv6_hdr,
751 (union rte_thash_tuple *)&ipv6_tuple);
753 input_len = RTE_THASH_V6_L3_LEN;
757 return rte_softrss_be(tuple, input_len, rss_key_be);
761 rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
763 return !!rx_adapter->enq_block_count;
767 rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
769 if (rx_adapter->rx_enq_block_start_ts)
772 rx_adapter->enq_block_count++;
773 if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
776 rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
780 rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
781 struct rte_event_eth_rx_adapter_stats *stats)
783 if (unlikely(!stats->rx_enq_start_ts))
784 stats->rx_enq_start_ts = rte_get_tsc_cycles();
786 if (likely(!rxa_enq_blocked(rx_adapter)))
789 rx_adapter->enq_block_count = 0;
790 if (rx_adapter->rx_enq_block_start_ts) {
791 stats->rx_enq_end_ts = rte_get_tsc_cycles();
792 stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
793 rx_adapter->rx_enq_block_start_ts;
794 rx_adapter->rx_enq_block_start_ts = 0;
798 /* Enqueue buffered events to event device */
799 static inline uint16_t
800 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
801 struct eth_event_enqueue_buffer *buf,
802 struct rte_event_eth_rx_adapter_stats *stats)
804 uint16_t count = buf->count;
811 count = buf->last - buf->head;
814 n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
815 rx_adapter->event_port_id,
816 &buf->events[buf->head],
819 stats->rx_enq_retry++;
824 if (buf->last && n == count) {
827 n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
828 rx_adapter->event_port_id,
833 stats->rx_enq_retry++;
841 n ? rxa_enq_block_end_ts(rx_adapter, stats) :
842 rxa_enq_block_start_ts(rx_adapter);
845 stats->rx_enq_count += n;
851 rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
852 struct eth_rx_vector_data *vec)
854 vec->vector_ev->nb_elem = 0;
855 vec->vector_ev->port = vec->port;
856 vec->vector_ev->queue = vec->queue;
857 vec->vector_ev->attr_valid = true;
858 TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
861 static inline uint16_t
862 rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
863 struct eth_rx_queue_info *queue_info,
864 struct eth_event_enqueue_buffer *buf,
865 struct rte_mbuf **mbufs, uint16_t num)
867 struct rte_event *ev = &buf->events[buf->count];
868 struct eth_rx_vector_data *vec;
869 uint16_t filled, space, sz;
872 vec = &queue_info->vector_data;
874 if (vec->vector_ev == NULL) {
875 if (rte_mempool_get(vec->vector_pool,
876 (void **)&vec->vector_ev) < 0) {
877 rte_pktmbuf_free_bulk(mbufs, num);
880 rxa_init_vector(rx_adapter, vec);
883 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
885 ev->event = vec->event;
886 ev->vec = vec->vector_ev;
889 vec->vector_ev = NULL;
890 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
891 if (rte_mempool_get(vec->vector_pool,
892 (void **)&vec->vector_ev) < 0) {
893 rte_pktmbuf_free_bulk(mbufs, num);
896 rxa_init_vector(rx_adapter, vec);
899 space = vec->max_vector_count - vec->vector_ev->nb_elem;
900 sz = num > space ? space : num;
901 memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
902 sizeof(void *) * sz);
903 vec->vector_ev->nb_elem += sz;
906 vec->ts = rte_rdtsc();
909 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
910 ev->event = vec->event;
911 ev->vec = vec->vector_ev;
914 vec->vector_ev = NULL;
915 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
922 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
923 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
924 struct eth_event_enqueue_buffer *buf,
925 struct rte_event_eth_rx_adapter_stats *stats)
928 struct eth_device_info *dev_info =
929 &rx_adapter->eth_devices[eth_dev_id];
930 struct eth_rx_queue_info *eth_rx_queue_info =
931 &dev_info->rx_queue[rx_queue_id];
932 uint16_t new_tail = buf->tail;
933 uint64_t event = eth_rx_queue_info->event;
934 uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
935 struct rte_mbuf *m = mbufs[0];
941 uint64_t ts, ts_mask;
943 if (!eth_rx_queue_info->ena_vector) {
944 ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
945 0 : rte_get_tsc_cycles();
947 /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
950 ts_mask = (uint64_t)(!(m->ol_flags &
951 event_eth_rx_timestamp_dynflag)) - 1ULL;
953 /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
954 rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
955 do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
956 for (i = 0; i < num; i++) {
957 struct rte_event *ev;
960 *rxa_timestamp_dynfield(m) = ts |
961 (*rxa_timestamp_dynfield(m) & ts_mask);
963 ev = &buf->events[new_tail];
965 rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
968 ev->flow_id = (rss & ~flow_id_mask) |
969 (ev->flow_id & flow_id_mask);
974 num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
978 if (num && dev_info->cb_fn) {
981 nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
983 (buf->events_size & ~buf->last_mask),
984 buf->count >= BATCH_SIZE ?
985 buf->count - BATCH_SIZE : 0,
986 &buf->events[buf->tail],
990 if (unlikely(nb_cb > num))
991 RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
996 stats->rx_dropped += dropped;
1004 rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
1006 uint32_t nb_req = buf->tail + BATCH_SIZE;
1009 if (nb_req <= buf->events_size)
1012 if (buf->head >= BATCH_SIZE) {
1013 buf->last_mask = ~0;
1014 buf->last = buf->tail;
1020 return nb_req <= buf->head;
1023 /* Enqueue packets from <port, q> to event buffer */
1024 static inline uint32_t
1025 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
1026 uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
1027 int *rxq_empty, struct eth_event_enqueue_buffer *buf,
1028 struct rte_event_eth_rx_adapter_stats *stats)
1030 struct rte_mbuf *mbufs[BATCH_SIZE];
1033 uint32_t nb_flushed = 0;
1037 /* Don't do a batch dequeue from the rx queue if there isn't
1038 * enough space in the enqueue buffer.
1040 while (rxa_pkt_buf_available(buf)) {
1041 if (buf->count >= BATCH_SIZE)
1043 rxa_flush_event_buffer(rx_adapter, buf, stats);
1045 stats->rx_poll_count++;
1046 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
1052 rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
1055 if (rx_count + nb_rx > max_rx)
1060 nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats);
1062 stats->rx_packets += nb_rx;
1063 if (nb_flushed == 0)
1064 rte_event_maintain(rx_adapter->eventdev_id,
1065 rx_adapter->event_port_id, 0);
1071 rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
1076 union queue_data qd;
1077 struct eth_device_info *dev_info;
1078 struct eth_rx_queue_info *queue_info;
1085 dev_info = &rx_adapter->eth_devices[port_id];
1086 queue_info = &dev_info->rx_queue[queue];
1087 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1088 if (rxa_shared_intr(dev_info, queue))
1089 intr_enabled = &dev_info->shared_intr_enabled;
1091 intr_enabled = &queue_info->intr_enabled;
1093 if (*intr_enabled) {
1095 err = rte_ring_enqueue(rx_adapter->intr_ring, data);
1096 /* Entry should always be available.
1097 * The ring size equals the maximum number of interrupt
1098 * vectors supported (an interrupt vector is shared in
1099 * case of shared interrupts)
1102 RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
1103 " to ring: %s", strerror(-err));
1105 rte_eth_dev_rx_intr_disable(port_id, queue);
1107 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1111 rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
1112 uint32_t num_intr_vec)
1114 if (rx_adapter->num_intr_vec + num_intr_vec >
1115 RTE_EVENT_ETH_INTR_RING_SIZE) {
1116 RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
1117 " %d needed %d limit %d", rx_adapter->num_intr_vec,
1118 num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
1125 /* Delete entries for (dev, queue) from the interrupt ring */
1127 rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
1128 struct eth_device_info *dev_info,
1129 uint16_t rx_queue_id)
1132 union queue_data qd;
1134 rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1136 n = rte_ring_count(rx_adapter->intr_ring);
1137 for (i = 0; i < n; i++) {
1138 rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1139 if (!rxa_shared_intr(dev_info, rx_queue_id)) {
1140 if (qd.port == dev_info->dev->data->port_id &&
1141 qd.queue == rx_queue_id)
1144 if (qd.port == dev_info->dev->data->port_id)
1147 rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
1150 rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1153 /* pthread callback handling interrupt mode receive queues
1154 * After receiving an Rx interrupt, it enqueues the port id and queue id of the
1155 * interrupting queue to the adapter's ring buffer for interrupt events.
1156 * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
1157 * the adapter service function.
1160 rxa_intr_thread(void *arg)
1162 struct event_eth_rx_adapter *rx_adapter = arg;
1163 struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
1167 n = rte_epoll_wait(rx_adapter->epd, epoll_events,
1168 RTE_EVENT_ETH_INTR_RING_SIZE, -1);
1169 if (unlikely(n < 0))
1170 RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
1172 for (i = 0; i < n; i++) {
1173 rxa_intr_ring_enqueue(rx_adapter,
1174 epoll_events[i].epdata.data);
1181 /* Dequeue <port, q> from interrupt ring and enqueue received
1185 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
1190 struct eth_event_enqueue_buffer *buf;
1191 struct rte_event_eth_rx_adapter_stats *stats;
1192 rte_spinlock_t *ring_lock;
1193 uint8_t max_done = 0;
1195 if (rx_adapter->num_rx_intr == 0)
1198 if (rte_ring_count(rx_adapter->intr_ring) == 0
1199 && !rx_adapter->qd_valid)
1202 buf = &rx_adapter->event_enqueue_buffer;
1203 stats = &rx_adapter->stats;
1204 ring_lock = &rx_adapter->intr_ring_lock;
1206 if (buf->count >= BATCH_SIZE)
1207 rxa_flush_event_buffer(rx_adapter, buf, stats);
1209 while (rxa_pkt_buf_available(buf)) {
1210 struct eth_device_info *dev_info;
1213 union queue_data qd = rx_adapter->qd;
1216 if (!rx_adapter->qd_valid) {
1217 struct eth_rx_queue_info *queue_info;
1219 rte_spinlock_lock(ring_lock);
1220 err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1222 rte_spinlock_unlock(ring_lock);
1228 rx_adapter->qd = qd;
1229 rx_adapter->qd_valid = 1;
1230 dev_info = &rx_adapter->eth_devices[port];
1231 if (rxa_shared_intr(dev_info, queue))
1232 dev_info->shared_intr_enabled = 1;
1234 queue_info = &dev_info->rx_queue[queue];
1235 queue_info->intr_enabled = 1;
1237 rte_eth_dev_rx_intr_enable(port, queue);
1238 rte_spinlock_unlock(ring_lock);
1243 dev_info = &rx_adapter->eth_devices[port];
1246 if (rxa_shared_intr(dev_info, queue)) {
1250 nb_queues = dev_info->dev->data->nb_rx_queues;
1252 for (i = dev_info->next_q_idx; i < nb_queues; i++) {
1253 uint8_t enq_buffer_full;
1255 if (!rxa_intr_queue(dev_info, i))
1257 n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
1258 rx_adapter->max_nb_rx,
1259 &rxq_empty, buf, stats);
1262 enq_buffer_full = !rxq_empty && n == 0;
1263 max_done = nb_rx > rx_adapter->max_nb_rx;
1265 if (enq_buffer_full || max_done) {
1266 dev_info->next_q_idx = i;
1271 rx_adapter->qd_valid = 0;
1273 /* Reinitialize for next interrupt */
1274 dev_info->next_q_idx = dev_info->multi_intr_cap ?
1275 RTE_MAX_RXTX_INTR_VEC_ID - 1 :
1278 n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
1279 rx_adapter->max_nb_rx,
1280 &rxq_empty, buf, stats);
1281 rx_adapter->qd_valid = !rxq_empty;
1283 if (nb_rx > rx_adapter->max_nb_rx)
1289 rx_adapter->stats.rx_intr_packets += nb_rx;
1293 * Polls receive queues added to the event adapter and enqueues received
1294 * packets to the event device.
1296 * The receive code enqueues initially to a temporary buffer, the
1297 * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
1299 * If there isn't space available in the temporary buffer, packets from the
1300 * Rx queue aren't dequeued from the eth device, this back pressures the
1301 * eth device, in virtual device environments this back pressure is relayed to
1302 * the hypervisor's switching layer where adjustments can be made to deal with
1306 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
1310 struct eth_event_enqueue_buffer *buf = NULL;
1311 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1315 wrr_pos = rx_adapter->wrr_pos;
1316 max_nb_rx = rx_adapter->max_nb_rx;
1318 /* Iterate through a WRR sequence */
1319 for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
1320 unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
1321 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
1322 uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
1324 buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
1326 /* Don't do a batch dequeue from the rx queue if there isn't
1327 * enough space in the enqueue buffer.
1329 if (buf->count >= BATCH_SIZE)
1330 rxa_flush_event_buffer(rx_adapter, buf, stats);
1331 if (!rxa_pkt_buf_available(buf)) {
1332 if (rx_adapter->use_queue_event_buf)
1333 goto poll_next_entry;
1335 rx_adapter->wrr_pos = wrr_pos;
1340 nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1342 if (nb_rx > max_nb_rx) {
1343 rx_adapter->wrr_pos =
1344 (wrr_pos + 1) % rx_adapter->wrr_len;
1349 if (++wrr_pos == rx_adapter->wrr_len)
1355 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
1357 struct event_eth_rx_adapter *rx_adapter = arg;
1358 struct eth_event_enqueue_buffer *buf = NULL;
1359 struct rte_event_eth_rx_adapter_stats *stats = NULL;
1360 struct rte_event *ev;
1362 buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
1365 rxa_flush_event_buffer(rx_adapter, buf, stats);
1367 if (vec->vector_ev->nb_elem == 0)
1369 ev = &buf->events[buf->count];
1372 ev->event = vec->event;
1373 ev->vec = vec->vector_ev;
1376 vec->vector_ev = NULL;
1381 rxa_service_func(void *args)
1383 struct event_eth_rx_adapter *rx_adapter = args;
1385 if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
1387 if (!rx_adapter->rxa_started) {
1388 rte_spinlock_unlock(&rx_adapter->rx_lock);
1392 if (rx_adapter->ena_vector) {
1393 if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
1394 rx_adapter->vector_tmo_ticks) {
1395 struct eth_rx_vector_data *vec;
1397 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1398 uint64_t elapsed_time = rte_rdtsc() - vec->ts;
1400 if (elapsed_time >= vec->vector_timeout_ticks) {
1401 rxa_vector_expire(vec, rx_adapter);
1402 TAILQ_REMOVE(&rx_adapter->vector_list,
1406 rx_adapter->prev_expiry_ts = rte_rdtsc();
1410 rxa_intr_ring_dequeue(rx_adapter);
1411 rxa_poll(rx_adapter);
1413 rte_spinlock_unlock(&rx_adapter->rx_lock);
1419 rte_event_eth_rx_adapter_init(void)
1421 const char *name = RXA_ADAPTER_ARRAY;
1422 const struct rte_memzone *mz;
1425 sz = sizeof(*event_eth_rx_adapter) *
1426 RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
1427 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
1429 mz = rte_memzone_lookup(name);
1431 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
1432 RTE_CACHE_LINE_SIZE);
1434 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
1440 event_eth_rx_adapter = mz->addr;
1445 rxa_memzone_lookup(void)
1447 const struct rte_memzone *mz;
1449 if (event_eth_rx_adapter == NULL) {
1450 mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1453 event_eth_rx_adapter = mz->addr;
1459 static inline struct event_eth_rx_adapter *
1460 rxa_id_to_adapter(uint8_t id)
1462 return event_eth_rx_adapter ?
1463 event_eth_rx_adapter[id] : NULL;
1467 rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
1468 struct rte_event_eth_rx_adapter_conf *conf, void *arg)
1471 struct rte_eventdev *dev;
1472 struct rte_event_dev_config dev_conf;
1475 struct rte_event_port_conf *port_conf = arg;
1476 struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
1478 dev = &rte_eventdevs[rx_adapter->eventdev_id];
1479 dev_conf = dev->data->dev_conf;
1481 started = dev->data->dev_started;
1483 rte_event_dev_stop(dev_id);
1484 port_id = dev_conf.nb_event_ports;
1485 dev_conf.nb_event_ports += 1;
1486 ret = rte_event_dev_configure(dev_id, &dev_conf);
1488 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
1491 if (rte_event_dev_start(dev_id))
1497 ret = rte_event_port_setup(dev_id, port_id, port_conf);
1499 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
1504 conf->event_port_id = port_id;
1505 conf->max_nb_rx = 128;
1507 ret = rte_event_dev_start(dev_id);
1508 rx_adapter->default_cb_arg = 1;
1513 rxa_epoll_create1(void)
1517 fd = epoll_create1(EPOLL_CLOEXEC);
1518 return fd < 0 ? -errno : fd;
1525 rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
1527 if (rx_adapter->epd != INIT_FD)
1530 rx_adapter->epd = rxa_epoll_create1();
1531 if (rx_adapter->epd < 0) {
1532 int err = rx_adapter->epd;
1533 rx_adapter->epd = INIT_FD;
1534 RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
1542 rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1545 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1547 if (rx_adapter->intr_ring)
1550 rx_adapter->intr_ring = rte_ring_create("intr_ring",
1551 RTE_EVENT_ETH_INTR_RING_SIZE,
1552 rte_socket_id(), 0);
1553 if (!rx_adapter->intr_ring)
1556 rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
1557 RTE_EVENT_ETH_INTR_RING_SIZE *
1558 sizeof(struct rte_epoll_event),
1559 RTE_CACHE_LINE_SIZE,
1560 rx_adapter->socket_id);
1561 if (!rx_adapter->epoll_events) {
1566 rte_spinlock_init(&rx_adapter->intr_ring_lock);
1568 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
1569 "rx-intr-thread-%d", rx_adapter->id);
1571 err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
1572 NULL, rxa_intr_thread, rx_adapter);
1576 RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1577 rte_free(rx_adapter->epoll_events);
1579 rte_ring_free(rx_adapter->intr_ring);
1580 rx_adapter->intr_ring = NULL;
1581 rx_adapter->epoll_events = NULL;
1586 rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1590 err = pthread_cancel(rx_adapter->rx_intr_thread);
1592 RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
1595 err = pthread_join(rx_adapter->rx_intr_thread, NULL);
1597 RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
1599 rte_free(rx_adapter->epoll_events);
1600 rte_ring_free(rx_adapter->intr_ring);
1601 rx_adapter->intr_ring = NULL;
1602 rx_adapter->epoll_events = NULL;
1607 rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
1611 if (rx_adapter->num_rx_intr == 0)
1614 ret = rxa_destroy_intr_thread(rx_adapter);
1618 close(rx_adapter->epd);
1619 rx_adapter->epd = INIT_FD;
1625 rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
1626 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1629 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1630 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1632 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1634 RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
1639 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1644 RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
1647 dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
1649 dev_info->shared_intr_enabled = 0;
1654 rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1655 struct eth_device_info *dev_info, int rx_queue_id)
1661 if (dev_info->nb_rx_intr == 0)
1665 if (rx_queue_id == -1) {
1666 s = dev_info->nb_shared_intr;
1667 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1671 q = dev_info->intr_queue[i];
1672 sintr = rxa_shared_intr(dev_info, q);
1675 if (!sintr || s == 0) {
1677 err = rxa_disable_intr(rx_adapter, dev_info,
1681 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1686 if (!rxa_intr_queue(dev_info, rx_queue_id))
1688 if (!rxa_shared_intr(dev_info, rx_queue_id) ||
1689 dev_info->nb_shared_intr == 1) {
1690 err = rxa_disable_intr(rx_adapter, dev_info,
1694 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1698 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1699 if (dev_info->intr_queue[i] == rx_queue_id) {
1700 for (; i < dev_info->nb_rx_intr - 1; i++)
1701 dev_info->intr_queue[i] =
1702 dev_info->intr_queue[i + 1];
1712 rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
1713 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1716 uint16_t eth_dev_id = dev_info->dev->data->port_id;
1717 union queue_data qd;
1719 uint16_t *intr_queue;
1720 int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1722 if (rxa_intr_queue(dev_info, rx_queue_id))
1725 intr_queue = dev_info->intr_queue;
1726 if (dev_info->intr_queue == NULL) {
1728 dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
1729 dev_info->intr_queue =
1731 rx_adapter->mem_name,
1734 rx_adapter->socket_id);
1735 if (dev_info->intr_queue == NULL)
1739 init_fd = rx_adapter->epd;
1740 err = rxa_init_epd(rx_adapter);
1742 goto err_free_queue;
1744 qd.port = eth_dev_id;
1745 qd.queue = rx_queue_id;
1747 err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1752 RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
1753 " Rx Queue %u err %d", rx_queue_id, err);
1757 err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
1759 RTE_EDEV_LOG_ERR("Could not enable interrupt for"
1760 " Rx Queue %u err %d", rx_queue_id, err);
1765 err = rxa_create_intr_thread(rx_adapter);
1768 dev_info->shared_intr_enabled = 1;
1770 dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
1775 err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1777 RTE_EDEV_LOG_ERR("Could not disable interrupt for"
1778 " Rx Queue %u err %d", rx_queue_id, err);
1780 err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1785 RTE_EDEV_LOG_ERR("Could not delete event for"
1786 " Rx Queue %u err %d", rx_queue_id, err1);
1789 if (init_fd == INIT_FD) {
1790 close(rx_adapter->epd);
1791 rx_adapter->epd = -1;
1794 if (intr_queue == NULL)
1795 rte_free(dev_info->intr_queue);
1801 rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1802 struct eth_device_info *dev_info, int rx_queue_id)
1807 int shared_done = (dev_info->nb_shared_intr > 0);
1809 if (rx_queue_id != -1) {
1810 if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
1812 return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
1816 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
1818 if (rxa_shared_intr(dev_info, i) && shared_done)
1821 err = rxa_config_intr(rx_adapter, dev_info, i);
1823 shared_done = err == 0 && rxa_shared_intr(dev_info, i);
1826 dev_info->shared_intr_enabled = 1;
1835 shared_done = (dev_info->nb_shared_intr > 0);
1836 for (j = 0; j < i; j++) {
1837 if (rxa_intr_queue(dev_info, j))
1839 if (rxa_shared_intr(dev_info, j) && si != j)
1841 err = rxa_disable_intr(rx_adapter, dev_info, j);
1851 rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
1854 struct rte_service_spec service;
1855 struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
1857 if (rx_adapter->service_inited)
1860 memset(&service, 0, sizeof(service));
1861 snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
1862 "rte_event_eth_rx_adapter_%d", id);
1863 service.socket_id = rx_adapter->socket_id;
1864 service.callback = rxa_service_func;
1865 service.callback_userdata = rx_adapter;
1866 /* Service function handles locking for queue add/del updates */
1867 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
1868 ret = rte_service_component_register(&service, &rx_adapter->service_id);
1870 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
1875 ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
1876 &rx_adapter_conf, rx_adapter->conf_arg);
1878 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
1882 rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
1883 rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
1884 rx_adapter->service_inited = 1;
1885 rx_adapter->epd = INIT_FD;
1889 rte_service_component_unregister(rx_adapter->service_id);
1894 rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
1895 struct eth_device_info *dev_info, int32_t rx_queue_id,
1898 struct eth_rx_queue_info *queue_info;
1902 if (dev_info->rx_queue == NULL)
1905 if (rx_queue_id == -1) {
1906 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
1907 rxa_update_queue(rx_adapter, dev_info, i, add);
1909 queue_info = &dev_info->rx_queue[rx_queue_id];
1910 enabled = queue_info->queue_enabled;
1912 rx_adapter->nb_queues += !enabled;
1913 dev_info->nb_dev_queues += !enabled;
1915 rx_adapter->nb_queues -= enabled;
1916 dev_info->nb_dev_queues -= enabled;
1918 queue_info->queue_enabled = !!add;
1923 rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
1924 uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
1927 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
1928 struct eth_rx_vector_data *vector_data;
1931 vector_data = &queue_info->vector_data;
1932 vector_data->max_vector_count = vector_count;
1933 vector_data->port = port_id;
1934 vector_data->queue = qid;
1935 vector_data->vector_pool = mp;
1936 vector_data->vector_timeout_ticks =
1937 NSEC2TICK(vector_ns, rte_get_timer_hz());
1938 vector_data->ts = 0;
1939 flow_id = queue_info->event & 0xFFFFF;
1941 flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
1942 vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
1946 rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
1947 struct eth_device_info *dev_info, int32_t rx_queue_id)
1949 struct eth_rx_vector_data *vec;
1955 if (rx_adapter->nb_queues == 0)
1958 if (rx_queue_id == -1) {
1959 uint16_t nb_rx_queues;
1962 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1963 for (i = 0; i < nb_rx_queues; i++)
1964 rxa_sw_del(rx_adapter, dev_info, i);
1968 /* Push all the partial event vectors to event device. */
1969 TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1970 if (vec->queue != rx_queue_id)
1972 rxa_vector_expire(vec, rx_adapter);
1973 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
1976 pollq = rxa_polled_queue(dev_info, rx_queue_id);
1977 intrq = rxa_intr_queue(dev_info, rx_queue_id);
1978 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1979 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
1980 rx_adapter->num_rx_polled -= pollq;
1981 dev_info->nb_rx_poll -= pollq;
1982 rx_adapter->num_rx_intr -= intrq;
1983 dev_info->nb_rx_intr -= intrq;
1984 dev_info->nb_shared_intr -= intrq && sintrq;
1985 if (rx_adapter->use_queue_event_buf) {
1986 struct eth_event_enqueue_buffer *event_buf =
1987 dev_info->rx_queue[rx_queue_id].event_buf;
1988 struct rte_event_eth_rx_adapter_stats *stats =
1989 dev_info->rx_queue[rx_queue_id].stats;
1990 rte_free(event_buf->events);
1991 rte_free(event_buf);
1993 dev_info->rx_queue[rx_queue_id].event_buf = NULL;
1994 dev_info->rx_queue[rx_queue_id].stats = NULL;
1999 rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
2000 struct eth_device_info *dev_info, int32_t rx_queue_id,
2001 const struct rte_event_eth_rx_adapter_queue_conf *conf)
2003 struct eth_rx_queue_info *queue_info;
2004 const struct rte_event *ev = &conf->ev;
2008 struct rte_event *qi_ev;
2009 struct eth_event_enqueue_buffer *new_rx_buf = NULL;
2010 struct rte_event_eth_rx_adapter_stats *stats = NULL;
2011 uint16_t eth_dev_id = dev_info->dev->data->port_id;
2014 if (rx_queue_id == -1) {
2015 uint16_t nb_rx_queues;
2018 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2019 for (i = 0; i < nb_rx_queues; i++) {
2020 ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
2027 pollq = rxa_polled_queue(dev_info, rx_queue_id);
2028 intrq = rxa_intr_queue(dev_info, rx_queue_id);
2029 sintrq = rxa_shared_intr(dev_info, rx_queue_id);
2031 queue_info = &dev_info->rx_queue[rx_queue_id];
2032 queue_info->wt = conf->servicing_weight;
2034 qi_ev = (struct rte_event *)&queue_info->event;
2035 qi_ev->event = ev->event;
2036 qi_ev->op = RTE_EVENT_OP_NEW;
2037 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
2038 qi_ev->sub_event_type = 0;
2040 if (conf->rx_queue_flags &
2041 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
2042 queue_info->flow_id_mask = ~0;
2046 if (conf->rx_queue_flags &
2047 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2048 queue_info->ena_vector = 1;
2049 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
2050 rxa_set_vector_data(queue_info, conf->vector_sz,
2051 conf->vector_timeout_ns, conf->vector_mp,
2052 rx_queue_id, dev_info->dev->data->port_id);
2053 rx_adapter->ena_vector = 1;
2054 rx_adapter->vector_tmo_ticks =
2055 rx_adapter->vector_tmo_ticks ?
2056 RTE_MIN(queue_info->vector_data
2057 .vector_timeout_ticks >>
2059 rx_adapter->vector_tmo_ticks) :
2060 queue_info->vector_data.vector_timeout_ticks >>
2064 rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
2065 if (rxa_polled_queue(dev_info, rx_queue_id)) {
2066 rx_adapter->num_rx_polled += !pollq;
2067 dev_info->nb_rx_poll += !pollq;
2068 rx_adapter->num_rx_intr -= intrq;
2069 dev_info->nb_rx_intr -= intrq;
2070 dev_info->nb_shared_intr -= intrq && sintrq;
2073 if (rxa_intr_queue(dev_info, rx_queue_id)) {
2074 rx_adapter->num_rx_polled -= pollq;
2075 dev_info->nb_rx_poll -= pollq;
2076 rx_adapter->num_rx_intr += !intrq;
2077 dev_info->nb_rx_intr += !intrq;
2078 dev_info->nb_shared_intr += !intrq && sintrq;
2079 if (dev_info->nb_shared_intr == 1) {
2080 if (dev_info->multi_intr_cap)
2081 dev_info->next_q_idx =
2082 RTE_MAX_RXTX_INTR_VEC_ID - 1;
2084 dev_info->next_q_idx = 0;
2088 if (!rx_adapter->use_queue_event_buf)
2091 new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2092 sizeof(*new_rx_buf), 0,
2093 rte_eth_dev_socket_id(eth_dev_id));
2094 if (new_rx_buf == NULL) {
2095 RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2096 "dev_id: %d queue_id: %d",
2097 eth_dev_id, rx_queue_id);
2101 new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2102 new_rx_buf->events_size += (2 * BATCH_SIZE);
2103 new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2104 sizeof(struct rte_event) *
2105 new_rx_buf->events_size, 0,
2106 rte_eth_dev_socket_id(eth_dev_id));
2107 if (new_rx_buf->events == NULL) {
2108 rte_free(new_rx_buf);
2109 RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2110 "dev_id: %d queue_id: %d",
2111 eth_dev_id, rx_queue_id);
2115 queue_info->event_buf = new_rx_buf;
2117 /* Allocate storage for adapter queue stats */
2118 stats = rte_zmalloc_socket("rx_queue_stats",
2120 rte_eth_dev_socket_id(eth_dev_id));
2121 if (stats == NULL) {
2122 rte_free(new_rx_buf->events);
2123 rte_free(new_rx_buf);
2124 RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
2125 " dev_id: %d queue_id: %d",
2126 eth_dev_id, rx_queue_id);
2130 queue_info->stats = stats;
2136 rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
2138 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2140 struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
2141 struct rte_event_eth_rx_adapter_queue_conf temp_conf;
2143 struct eth_rx_poll_entry *rx_poll;
2144 struct eth_rx_queue_info *rx_queue;
2146 uint16_t nb_rx_queues;
2147 uint32_t nb_rx_poll, nb_wrr;
2148 uint32_t nb_rx_intr;
2152 if (queue_conf->servicing_weight == 0) {
2153 struct rte_eth_dev_data *data = dev_info->dev->data;
2155 temp_conf = *queue_conf;
2156 if (!data->dev_conf.intr_conf.rxq) {
2157 /* If Rx interrupts are disabled set wt = 1 */
2158 temp_conf.servicing_weight = 1;
2160 queue_conf = &temp_conf;
2162 if (queue_conf->servicing_weight == 0 &&
2163 rx_adapter->use_queue_event_buf) {
2165 RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2166 "not supported for interrupt queues "
2167 "dev_id: %d queue_id: %d",
2168 eth_dev_id, rx_queue_id);
2173 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2174 rx_queue = dev_info->rx_queue;
2175 wt = queue_conf->servicing_weight;
2177 if (dev_info->rx_queue == NULL) {
2178 dev_info->rx_queue =
2179 rte_zmalloc_socket(rx_adapter->mem_name,
2181 sizeof(struct eth_rx_queue_info), 0,
2182 rx_adapter->socket_id);
2183 if (dev_info->rx_queue == NULL)
2189 rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
2190 queue_conf->servicing_weight,
2191 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2193 if (dev_info->dev->intr_handle)
2194 dev_info->multi_intr_cap =
2195 rte_intr_cap_multiple(dev_info->dev->intr_handle);
2197 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2200 goto err_free_rxqueue;
2203 num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
2205 ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
2207 goto err_free_rxqueue;
2209 ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
2211 goto err_free_rxqueue;
2215 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2216 num_intr_vec = rxa_nb_intr_vect(dev_info,
2218 /* interrupt based queues are being converted to
2219 * poll mode queues, delete the interrupt configuration
2222 ret = rxa_del_intr_queue(rx_adapter,
2223 dev_info, rx_queue_id);
2225 goto err_free_rxqueue;
2229 if (nb_rx_intr == 0) {
2230 ret = rxa_free_intr_resources(rx_adapter);
2232 goto err_free_rxqueue;
2238 if (rx_queue_id == -1) {
2239 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
2240 dev_info->intr_queue[i] = i;
2242 if (!rxa_intr_queue(dev_info, rx_queue_id))
2243 dev_info->intr_queue[nb_rx_intr - 1] =
2250 ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2252 goto err_free_rxqueue;
2253 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2255 rte_free(rx_adapter->eth_rx_poll);
2256 rte_free(rx_adapter->wrr_sched);
2258 rx_adapter->eth_rx_poll = rx_poll;
2259 rx_adapter->wrr_sched = rx_wrr;
2260 rx_adapter->wrr_len = nb_wrr;
2261 rx_adapter->num_intr_vec += num_intr_vec;
2265 if (rx_queue == NULL) {
2266 rte_free(dev_info->rx_queue);
2267 dev_info->rx_queue = NULL;
2277 rxa_ctrl(uint8_t id, int start)
2279 struct event_eth_rx_adapter *rx_adapter;
2280 struct rte_eventdev *dev;
2281 struct eth_device_info *dev_info;
2283 int use_service = 0;
2286 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2287 rx_adapter = rxa_id_to_adapter(id);
2288 if (rx_adapter == NULL)
2291 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2293 RTE_ETH_FOREACH_DEV(i) {
2294 dev_info = &rx_adapter->eth_devices[i];
2295 /* if start check for num dev queues */
2296 if (start && !dev_info->nb_dev_queues)
2298 /* if stop check if dev has been started */
2299 if (stop && !dev_info->dev_rx_started)
2301 use_service |= !dev_info->internal_event_port;
2302 dev_info->dev_rx_started = start;
2303 if (dev_info->internal_event_port == 0)
2305 start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
2306 &rte_eth_devices[i]) :
2307 (*dev->dev_ops->eth_rx_adapter_stop)(dev,
2308 &rte_eth_devices[i]);
2312 rte_spinlock_lock(&rx_adapter->rx_lock);
2313 rx_adapter->rxa_started = start;
2314 rte_service_runstate_set(rx_adapter->service_id, start);
2315 rte_spinlock_unlock(&rx_adapter->rx_lock);
2322 rxa_create(uint8_t id, uint8_t dev_id,
2323 struct rte_event_eth_rx_adapter_params *rxa_params,
2324 rte_event_eth_rx_adapter_conf_cb conf_cb,
2327 struct event_eth_rx_adapter *rx_adapter;
2328 struct eth_event_enqueue_buffer *buf;
2329 struct rte_event *events;
2333 char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
2334 const uint8_t default_rss_key[] = {
2335 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
2336 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
2337 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
2338 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
2339 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
2342 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2343 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2345 if (conf_cb == NULL)
2348 if (event_eth_rx_adapter == NULL) {
2349 ret = rte_event_eth_rx_adapter_init();
2354 rx_adapter = rxa_id_to_adapter(id);
2355 if (rx_adapter != NULL) {
2356 RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
2360 socket_id = rte_event_dev_socket_id(dev_id);
2361 snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
2362 "rte_event_eth_rx_adapter_%d",
2365 rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
2366 RTE_CACHE_LINE_SIZE, socket_id);
2367 if (rx_adapter == NULL) {
2368 RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
2372 rx_adapter->eventdev_id = dev_id;
2373 rx_adapter->socket_id = socket_id;
2374 rx_adapter->conf_cb = conf_cb;
2375 rx_adapter->conf_arg = conf_arg;
2376 rx_adapter->id = id;
2377 TAILQ_INIT(&rx_adapter->vector_list);
2378 strcpy(rx_adapter->mem_name, mem_name);
2379 rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
2381 sizeof(struct eth_device_info), 0,
2383 rte_convert_rss_key((const uint32_t *)default_rss_key,
2384 (uint32_t *)rx_adapter->rss_key_be,
2385 RTE_DIM(default_rss_key));
2387 if (rx_adapter->eth_devices == NULL) {
2388 RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
2389 rte_free(rx_adapter);
2393 rte_spinlock_init(&rx_adapter->rx_lock);
2395 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
2396 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
2398 /* Rx adapter event buffer allocation */
2399 rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2401 if (!rx_adapter->use_queue_event_buf) {
2402 buf = &rx_adapter->event_enqueue_buffer;
2403 buf->events_size = rxa_params->event_buf_size;
2405 events = rte_zmalloc_socket(rx_adapter->mem_name,
2406 buf->events_size * sizeof(*events),
2408 if (events == NULL) {
2409 RTE_EDEV_LOG_ERR("Failed to allocate memory "
2410 "for adapter event buffer");
2411 rte_free(rx_adapter->eth_devices);
2412 rte_free(rx_adapter);
2416 rx_adapter->event_enqueue_buffer.events = events;
2419 event_eth_rx_adapter[id] = rx_adapter;
2421 if (conf_cb == rxa_default_conf_cb)
2422 rx_adapter->default_cb_arg = 1;
2424 if (rte_mbuf_dyn_rx_timestamp_register(
2425 &event_eth_rx_timestamp_dynfield_offset,
2426 &event_eth_rx_timestamp_dynflag) != 0) {
2427 RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
2431 rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
2437 rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2438 rte_event_eth_rx_adapter_conf_cb conf_cb,
2441 struct rte_event_eth_rx_adapter_params rxa_params = {0};
2443 /* use default values for adapter params */
2444 rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2445 rxa_params.use_queue_event_buf = false;
2447 return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2451 rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2452 struct rte_event_port_conf *port_config,
2453 struct rte_event_eth_rx_adapter_params *rxa_params)
2455 struct rte_event_port_conf *pc;
2457 struct rte_event_eth_rx_adapter_params temp_params = {0};
2459 if (port_config == NULL)
2462 if (rxa_params == NULL) {
2463 /* use default values if rxa_params is NULL */
2464 rxa_params = &temp_params;
2465 rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2466 rxa_params->use_queue_event_buf = false;
2467 } else if ((!rxa_params->use_queue_event_buf &&
2468 rxa_params->event_buf_size == 0) ||
2469 (rxa_params->use_queue_event_buf &&
2470 rxa_params->event_buf_size != 0)) {
2471 RTE_EDEV_LOG_ERR("Invalid adapter params\n");
2473 } else if (!rxa_params->use_queue_event_buf) {
2474 /* adjust event buff size with BATCH_SIZE used for fetching
2475 * packets from NIC rx queues to get full buffer utilization
2476 * and prevent unnecessary rollovers.
2479 rxa_params->event_buf_size =
2480 RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
2481 rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2484 pc = rte_malloc(NULL, sizeof(*pc), 0);
2490 ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2498 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
2499 struct rte_event_port_conf *port_config)
2501 struct rte_event_port_conf *pc;
2504 if (port_config == NULL)
2507 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2509 pc = rte_malloc(NULL, sizeof(*pc), 0);
2514 ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
2515 rxa_default_conf_cb,
2523 rte_event_eth_rx_adapter_free(uint8_t id)
2525 struct event_eth_rx_adapter *rx_adapter;
2527 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2529 rx_adapter = rxa_id_to_adapter(id);
2530 if (rx_adapter == NULL)
2533 if (rx_adapter->nb_queues) {
2534 RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
2535 rx_adapter->nb_queues);
2539 if (rx_adapter->default_cb_arg)
2540 rte_free(rx_adapter->conf_arg);
2541 rte_free(rx_adapter->eth_devices);
2542 if (!rx_adapter->use_queue_event_buf)
2543 rte_free(rx_adapter->event_enqueue_buffer.events);
2544 rte_free(rx_adapter);
2545 event_eth_rx_adapter[id] = NULL;
2547 rte_eventdev_trace_eth_rx_adapter_free(id);
2552 rte_event_eth_rx_adapter_queue_add(uint8_t id,
2553 uint16_t eth_dev_id,
2554 int32_t rx_queue_id,
2555 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2559 struct event_eth_rx_adapter *rx_adapter;
2560 struct rte_eventdev *dev;
2561 struct eth_device_info *dev_info;
2562 struct rte_event_eth_rx_adapter_vector_limits limits;
2564 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2565 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2567 rx_adapter = rxa_id_to_adapter(id);
2568 if ((rx_adapter == NULL) || (queue_conf == NULL))
2571 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2572 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2576 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2577 "eth port %" PRIu16, id, eth_dev_id);
2581 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
2582 && (queue_conf->rx_queue_flags &
2583 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
2584 RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
2585 " eth port: %" PRIu16 " adapter id: %" PRIu8,
2590 if (queue_conf->rx_queue_flags &
2591 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2593 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
2594 RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2595 " eth port: %" PRIu16
2596 " adapter id: %" PRIu8,
2601 ret = rte_event_eth_rx_adapter_vector_limits_get(
2602 rx_adapter->eventdev_id, eth_dev_id, &limits);
2604 RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2605 " eth port: %" PRIu16
2606 " adapter id: %" PRIu8,
2610 if (queue_conf->vector_sz < limits.min_sz ||
2611 queue_conf->vector_sz > limits.max_sz ||
2612 queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2613 queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2614 queue_conf->vector_mp == NULL) {
2615 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2616 " eth port: %" PRIu16
2617 " adapter id: %" PRIu8,
2621 if (queue_conf->vector_mp->elt_size <
2622 (sizeof(struct rte_event_vector) +
2623 (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2624 RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2625 " eth port: %" PRIu16
2626 " adapter id: %" PRIu8,
2632 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
2633 (rx_queue_id != -1)) {
2634 RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
2635 "event queue, eth port: %" PRIu16 " adapter id: %"
2636 PRIu8, eth_dev_id, id);
2640 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2641 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2642 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2643 (uint16_t)rx_queue_id);
2647 if ((rx_adapter->use_queue_event_buf &&
2648 queue_conf->event_buf_size == 0) ||
2649 (!rx_adapter->use_queue_event_buf &&
2650 queue_conf->event_buf_size != 0)) {
2651 RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2655 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2657 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2658 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
2660 if (dev_info->rx_queue == NULL) {
2661 dev_info->rx_queue =
2662 rte_zmalloc_socket(rx_adapter->mem_name,
2663 dev_info->dev->data->nb_rx_queues *
2664 sizeof(struct eth_rx_queue_info), 0,
2665 rx_adapter->socket_id);
2666 if (dev_info->rx_queue == NULL)
2670 ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
2671 &rte_eth_devices[eth_dev_id],
2672 rx_queue_id, queue_conf);
2674 dev_info->internal_event_port = 1;
2675 rxa_update_queue(rx_adapter,
2676 &rx_adapter->eth_devices[eth_dev_id],
2681 rte_spinlock_lock(&rx_adapter->rx_lock);
2682 dev_info->internal_event_port = 0;
2683 ret = rxa_init_service(rx_adapter, id);
2685 uint32_t service_id = rx_adapter->service_id;
2686 ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
2688 rte_service_component_runstate_set(service_id,
2689 rxa_sw_adapter_queue_count(rx_adapter));
2691 rte_spinlock_unlock(&rx_adapter->rx_lock);
2694 rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
2695 rx_queue_id, queue_conf, ret);
2703 rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
2705 limits->max_sz = MAX_VECTOR_SIZE;
2706 limits->min_sz = MIN_VECTOR_SIZE;
2707 limits->max_timeout_ns = MAX_VECTOR_NS;
2708 limits->min_timeout_ns = MIN_VECTOR_NS;
2714 rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
2715 int32_t rx_queue_id)
2718 struct rte_eventdev *dev;
2719 struct event_eth_rx_adapter *rx_adapter;
2720 struct eth_device_info *dev_info;
2722 uint32_t nb_rx_poll = 0;
2723 uint32_t nb_wrr = 0;
2724 uint32_t nb_rx_intr;
2725 struct eth_rx_poll_entry *rx_poll = NULL;
2726 uint32_t *rx_wrr = NULL;
2729 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2730 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2732 rx_adapter = rxa_id_to_adapter(id);
2733 if (rx_adapter == NULL)
2736 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2737 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2743 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2744 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2745 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2746 (uint16_t)rx_queue_id);
2750 dev_info = &rx_adapter->eth_devices[eth_dev_id];
2752 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2753 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
2755 ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
2756 &rte_eth_devices[eth_dev_id],
2759 rxa_update_queue(rx_adapter,
2760 &rx_adapter->eth_devices[eth_dev_id],
2763 if (dev_info->nb_dev_queues == 0) {
2764 rte_free(dev_info->rx_queue);
2765 dev_info->rx_queue = NULL;
2769 rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
2770 &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2772 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2777 rte_spinlock_lock(&rx_adapter->rx_lock);
2780 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2782 num_intr_vec = rxa_nb_intr_vect(dev_info,
2784 ret = rxa_del_intr_queue(rx_adapter, dev_info,
2790 if (nb_rx_intr == 0) {
2791 ret = rxa_free_intr_resources(rx_adapter);
2796 rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
2797 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2799 rte_free(rx_adapter->eth_rx_poll);
2800 rte_free(rx_adapter->wrr_sched);
2802 if (nb_rx_intr == 0) {
2803 rte_free(dev_info->intr_queue);
2804 dev_info->intr_queue = NULL;
2807 rx_adapter->eth_rx_poll = rx_poll;
2808 rx_adapter->wrr_sched = rx_wrr;
2809 rx_adapter->wrr_len = nb_wrr;
2811 * reset next poll start position (wrr_pos) to avoid buffer
2812 * overrun when wrr_len is reduced in case of queue delete
2814 rx_adapter->wrr_pos = 0;
2815 rx_adapter->num_intr_vec += num_intr_vec;
2817 if (dev_info->nb_dev_queues == 0) {
2818 rte_free(dev_info->rx_queue);
2819 dev_info->rx_queue = NULL;
2822 rte_spinlock_unlock(&rx_adapter->rx_lock);
2829 rte_service_component_runstate_set(rx_adapter->service_id,
2830 rxa_sw_adapter_queue_count(rx_adapter));
2833 rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
2839 rte_event_eth_rx_adapter_vector_limits_get(
2840 uint8_t dev_id, uint16_t eth_port_id,
2841 struct rte_event_eth_rx_adapter_vector_limits *limits)
2843 struct rte_eventdev *dev;
2847 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2848 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
2853 dev = &rte_eventdevs[dev_id];
2855 ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
2857 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2858 "eth port %" PRIu16,
2859 dev_id, eth_port_id);
2863 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2864 RTE_FUNC_PTR_OR_ERR_RET(
2865 *dev->dev_ops->eth_rx_adapter_vector_limits_get,
2867 ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
2868 dev, &rte_eth_devices[eth_port_id], limits);
2870 ret = rxa_sw_vector_limits(limits);
2877 rte_event_eth_rx_adapter_start(uint8_t id)
2879 rte_eventdev_trace_eth_rx_adapter_start(id);
2880 return rxa_ctrl(id, 1);
2884 rte_event_eth_rx_adapter_stop(uint8_t id)
2886 rte_eventdev_trace_eth_rx_adapter_stop(id);
2887 return rxa_ctrl(id, 0);
2891 rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
2893 struct rte_event_eth_rx_adapter_stats *q_stats;
2895 q_stats = queue_info->stats;
2896 memset(q_stats, 0, sizeof(*q_stats));
2900 rte_event_eth_rx_adapter_stats_get(uint8_t id,
2901 struct rte_event_eth_rx_adapter_stats *stats)
2903 struct event_eth_rx_adapter *rx_adapter;
2904 struct eth_event_enqueue_buffer *buf;
2905 struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
2906 struct rte_event_eth_rx_adapter_stats dev_stats;
2907 struct rte_eventdev *dev;
2908 struct eth_device_info *dev_info;
2909 struct eth_rx_queue_info *queue_info;
2910 struct rte_event_eth_rx_adapter_stats *q_stats;
2914 if (rxa_memzone_lookup())
2917 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2919 rx_adapter = rxa_id_to_adapter(id);
2920 if (rx_adapter == NULL || stats == NULL)
2923 dev = &rte_eventdevs[rx_adapter->eventdev_id];
2924 memset(stats, 0, sizeof(*stats));
2926 if (rx_adapter->service_inited)
2927 *stats = rx_adapter->stats;
2929 RTE_ETH_FOREACH_DEV(i) {
2930 dev_info = &rx_adapter->eth_devices[i];
2932 if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
2934 for (j = 0; j < dev_info->dev->data->nb_rx_queues;
2936 queue_info = &dev_info->rx_queue[j];
2937 if (!queue_info->queue_enabled)
2939 q_stats = queue_info->stats;
2941 stats->rx_packets += q_stats->rx_packets;
2942 stats->rx_poll_count += q_stats->rx_poll_count;
2943 stats->rx_enq_count += q_stats->rx_enq_count;
2944 stats->rx_enq_retry += q_stats->rx_enq_retry;
2945 stats->rx_dropped += q_stats->rx_dropped;
2946 stats->rx_enq_block_cycles +=
2947 q_stats->rx_enq_block_cycles;
2951 if (dev_info->internal_event_port == 0 ||
2952 dev->dev_ops->eth_rx_adapter_stats_get == NULL)
2954 ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
2955 &rte_eth_devices[i],
2959 dev_stats_sum.rx_packets += dev_stats.rx_packets;
2960 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
2963 buf = &rx_adapter->event_enqueue_buffer;
2964 stats->rx_packets += dev_stats_sum.rx_packets;
2965 stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2966 stats->rx_event_buf_count = buf->count;
2967 stats->rx_event_buf_size = buf->events_size;
2973 rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
2974 uint16_t eth_dev_id,
2975 uint16_t rx_queue_id,
2976 struct rte_event_eth_rx_adapter_queue_stats *stats)
2978 struct event_eth_rx_adapter *rx_adapter;
2979 struct eth_device_info *dev_info;
2980 struct eth_rx_queue_info *queue_info;
2981 struct eth_event_enqueue_buffer *event_buf;
2982 struct rte_event_eth_rx_adapter_stats *q_stats;
2983 struct rte_eventdev *dev;
2985 if (rxa_memzone_lookup())
2988 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2989 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2991 rx_adapter = rxa_id_to_adapter(id);
2993 if (rx_adapter == NULL || stats == NULL)
2996 if (!rx_adapter->use_queue_event_buf)
2999 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3000 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
3004 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3005 if (dev_info->rx_queue == NULL ||
3006 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3007 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3011 if (dev_info->internal_event_port == 0) {
3012 queue_info = &dev_info->rx_queue[rx_queue_id];
3013 event_buf = queue_info->event_buf;
3014 q_stats = queue_info->stats;
3016 stats->rx_event_buf_count = event_buf->count;
3017 stats->rx_event_buf_size = event_buf->events_size;
3018 stats->rx_packets = q_stats->rx_packets;
3019 stats->rx_poll_count = q_stats->rx_poll_count;
3020 stats->rx_dropped = q_stats->rx_dropped;
3023 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3024 if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
3025 return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
3026 &rte_eth_devices[eth_dev_id],
3027 rx_queue_id, stats);
3034 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
3036 struct event_eth_rx_adapter *rx_adapter;
3037 struct rte_eventdev *dev;
3038 struct eth_device_info *dev_info;
3039 struct eth_rx_queue_info *queue_info;
3042 if (rxa_memzone_lookup())
3045 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3047 rx_adapter = rxa_id_to_adapter(id);
3048 if (rx_adapter == NULL)
3051 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3053 RTE_ETH_FOREACH_DEV(i) {
3054 dev_info = &rx_adapter->eth_devices[i];
3056 if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
3058 for (j = 0; j < dev_info->dev->data->nb_rx_queues;
3060 queue_info = &dev_info->rx_queue[j];
3061 if (!queue_info->queue_enabled)
3063 rxa_queue_stats_reset(queue_info);
3067 if (dev_info->internal_event_port == 0 ||
3068 dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
3070 (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
3071 &rte_eth_devices[i]);
3074 memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
3080 rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
3081 uint16_t eth_dev_id,
3082 uint16_t rx_queue_id)
3084 struct event_eth_rx_adapter *rx_adapter;
3085 struct eth_device_info *dev_info;
3086 struct eth_rx_queue_info *queue_info;
3087 struct rte_eventdev *dev;
3089 if (rxa_memzone_lookup())
3092 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3093 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3095 rx_adapter = rxa_id_to_adapter(id);
3096 if (rx_adapter == NULL)
3099 if (!rx_adapter->use_queue_event_buf)
3102 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3103 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
3107 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3109 if (dev_info->rx_queue == NULL ||
3110 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3111 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3115 if (dev_info->internal_event_port == 0) {
3116 queue_info = &dev_info->rx_queue[rx_queue_id];
3117 rxa_queue_stats_reset(queue_info);
3120 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3121 if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
3122 return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
3123 &rte_eth_devices[eth_dev_id],
3131 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
3133 struct event_eth_rx_adapter *rx_adapter;
3135 if (rxa_memzone_lookup())
3138 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3140 rx_adapter = rxa_id_to_adapter(id);
3141 if (rx_adapter == NULL || service_id == NULL)
3144 if (rx_adapter->service_inited)
3145 *service_id = rx_adapter->service_id;
3147 return rx_adapter->service_inited ? 0 : -ESRCH;
3151 rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
3153 struct event_eth_rx_adapter *rx_adapter;
3155 if (rxa_memzone_lookup())
3158 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3160 rx_adapter = rxa_id_to_adapter(id);
3161 if (rx_adapter == NULL || event_port_id == NULL)
3164 if (rx_adapter->service_inited)
3165 *event_port_id = rx_adapter->event_port_id;
3167 return rx_adapter->service_inited ? 0 : -ESRCH;
3171 rte_event_eth_rx_adapter_cb_register(uint8_t id,
3172 uint16_t eth_dev_id,
3173 rte_event_eth_rx_adapter_cb_fn cb_fn,
3176 struct event_eth_rx_adapter *rx_adapter;
3177 struct eth_device_info *dev_info;
3181 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3182 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3184 rx_adapter = rxa_id_to_adapter(id);
3185 if (rx_adapter == NULL)
3188 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3189 if (dev_info->rx_queue == NULL)
3192 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
3196 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
3197 "eth port %" PRIu16, id, eth_dev_id);
3201 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
3202 RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
3203 PRIu16, eth_dev_id);
3207 rte_spinlock_lock(&rx_adapter->rx_lock);
3208 dev_info->cb_fn = cb_fn;
3209 dev_info->cb_arg = cb_arg;
3210 rte_spinlock_unlock(&rx_adapter->rx_lock);
3216 rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
3217 uint16_t eth_dev_id,
3218 uint16_t rx_queue_id,
3219 struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
3221 struct rte_eventdev *dev;
3222 struct event_eth_rx_adapter *rx_adapter;
3223 struct eth_device_info *dev_info;
3224 struct eth_rx_queue_info *queue_info;
3225 struct rte_event *qi_ev;
3228 if (rxa_memzone_lookup())
3231 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3232 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3234 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3235 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3239 if (queue_conf == NULL) {
3240 RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3244 rx_adapter = rxa_id_to_adapter(id);
3245 if (rx_adapter == NULL)
3248 dev_info = &rx_adapter->eth_devices[eth_dev_id];
3249 if (dev_info->rx_queue == NULL ||
3250 !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3251 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3255 queue_info = &dev_info->rx_queue[rx_queue_id];
3256 qi_ev = (struct rte_event *)&queue_info->event;
3258 memset(queue_conf, 0, sizeof(*queue_conf));
3259 queue_conf->rx_queue_flags = 0;
3260 if (queue_info->flow_id_mask != 0)
3261 queue_conf->rx_queue_flags |=
3262 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3263 queue_conf->servicing_weight = queue_info->wt;
3265 memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
3267 dev = &rte_eventdevs[rx_adapter->eventdev_id];
3268 if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3269 ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3270 &rte_eth_devices[eth_dev_id],
3279 #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
3282 handle_rxa_stats(const char *cmd __rte_unused,
3284 struct rte_tel_data *d)
3286 uint8_t rx_adapter_id;
3287 struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3289 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3292 /* Get Rx adapter ID from parameter string */
3293 rx_adapter_id = atoi(params);
3294 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3296 /* Get Rx adapter stats */
3297 if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3299 RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
3303 rte_tel_data_start_dict(d);
3304 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3305 RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3306 RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3307 RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3308 RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3309 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3310 RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3311 RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3312 RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3313 RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3314 RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3315 RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3321 handle_rxa_stats_reset(const char *cmd __rte_unused,
3323 struct rte_tel_data *d __rte_unused)
3325 uint8_t rx_adapter_id;
3327 if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
3330 /* Get Rx adapter ID from parameter string */
3331 rx_adapter_id = atoi(params);
3332 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3334 /* Reset Rx adapter stats */
3335 if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3336 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
3344 handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3346 struct rte_tel_data *d)
3348 uint8_t rx_adapter_id;
3349 uint16_t rx_queue_id;
3350 int eth_dev_id, ret = -1;
3351 char *token, *l_params;
3352 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3354 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3357 /* Get Rx adapter ID from parameter string */
3358 l_params = strdup(params);
3359 if (l_params == NULL)
3361 token = strtok(l_params, ",");
3362 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3363 rx_adapter_id = strtoul(token, NULL, 10);
3364 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
3366 token = strtok(NULL, ",");
3367 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3369 /* Get device ID from parameter string */
3370 eth_dev_id = strtoul(token, NULL, 10);
3371 RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
3373 token = strtok(NULL, ",");
3374 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3376 /* Get Rx queue ID from parameter string */
3377 rx_queue_id = strtoul(token, NULL, 10);
3378 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3379 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3384 token = strtok(NULL, "\0");
3386 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3387 " telemetry command, ignoring");
3388 /* Parsing parameter finished */
3391 if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3392 rx_queue_id, &queue_conf)) {
3393 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3397 rte_tel_data_start_dict(d);
3398 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3399 rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3400 rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3401 RXA_ADD_DICT(queue_conf, rx_queue_flags);
3402 RXA_ADD_DICT(queue_conf, servicing_weight);
3403 RXA_ADD_DICT(queue_conf.ev, queue_id);
3404 RXA_ADD_DICT(queue_conf.ev, sched_type);
3405 RXA_ADD_DICT(queue_conf.ev, priority);
3406 RXA_ADD_DICT(queue_conf.ev, flow_id);
3416 handle_rxa_get_queue_stats(const char *cmd __rte_unused,
3418 struct rte_tel_data *d)
3420 uint8_t rx_adapter_id;
3421 uint16_t rx_queue_id;
3422 int eth_dev_id, ret = -1;
3423 char *token, *l_params;
3424 struct rte_event_eth_rx_adapter_queue_stats q_stats;
3426 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3429 /* Get Rx adapter ID from parameter string */
3430 l_params = strdup(params);
3431 if (l_params == NULL)
3433 token = strtok(l_params, ",");
3434 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3435 rx_adapter_id = strtoul(token, NULL, 10);
3436 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
3438 token = strtok(NULL, ",");
3439 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3441 /* Get device ID from parameter string */
3442 eth_dev_id = strtoul(token, NULL, 10);
3443 RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
3445 token = strtok(NULL, ",");
3446 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3448 /* Get Rx queue ID from parameter string */
3449 rx_queue_id = strtoul(token, NULL, 10);
3450 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3451 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3456 token = strtok(NULL, "\0");
3458 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3459 " telemetry command, ignoring");
3460 /* Parsing parameter finished */
3463 if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
3464 rx_queue_id, &q_stats)) {
3465 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
3469 rte_tel_data_start_dict(d);
3470 rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3471 rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3472 rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3473 RXA_ADD_DICT(q_stats, rx_event_buf_count);
3474 RXA_ADD_DICT(q_stats, rx_event_buf_size);
3475 RXA_ADD_DICT(q_stats, rx_poll_count);
3476 RXA_ADD_DICT(q_stats, rx_packets);
3477 RXA_ADD_DICT(q_stats, rx_dropped);
3487 handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
3489 struct rte_tel_data *d __rte_unused)
3491 uint8_t rx_adapter_id;
3492 uint16_t rx_queue_id;
3493 int eth_dev_id, ret = -1;
3494 char *token, *l_params;
3496 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3499 /* Get Rx adapter ID from parameter string */
3500 l_params = strdup(params);
3501 if (l_params == NULL)
3503 token = strtok(l_params, ",");
3504 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3505 rx_adapter_id = strtoul(token, NULL, 10);
3506 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
3508 token = strtok(NULL, ",");
3509 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3511 /* Get device ID from parameter string */
3512 eth_dev_id = strtoul(token, NULL, 10);
3513 RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
3515 token = strtok(NULL, ",");
3516 RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3518 /* Get Rx queue ID from parameter string */
3519 rx_queue_id = strtoul(token, NULL, 10);
3520 if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3521 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3526 token = strtok(NULL, "\0");
3528 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3529 " telemetry command, ignoring");
3530 /* Parsing parameter finished */
3533 if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
3536 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
3547 RTE_INIT(rxa_init_telemetry)
3549 rte_telemetry_register_cmd("/eventdev/rxa_stats",
3551 "Returns Rx adapter stats. Parameter: rxa_id");
3553 rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3554 handle_rxa_stats_reset,
3555 "Reset Rx adapter stats. Parameter: rxa_id");
3557 rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3558 handle_rxa_get_queue_conf,
3559 "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
3561 rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
3562 handle_rxa_get_queue_stats,
3563 "Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
3565 rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
3566 handle_rxa_queue_stats_reset,
3567 "Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");