c2d824275d7c03f1157d64d69df15828925c1246
[dpdk.git] / lib / eventdev / rte_event_eth_rx_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation.
3  * All rights reserved.
4  */
5 #if defined(LINUX)
6 #include <sys/epoll.h>
7 #endif
8 #include <unistd.h>
9
10 #include <rte_cycles.h>
11 #include <rte_common.h>
12 #include <rte_dev.h>
13 #include <rte_errno.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_service_component.h>
18 #include <rte_thash.h>
19 #include <rte_interrupts.h>
20 #include <rte_mbuf_dyn.h>
21 #include <rte_telemetry.h>
22
23 #include "rte_eventdev.h"
24 #include "eventdev_pmd.h"
25 #include "rte_eventdev_trace.h"
26 #include "rte_event_eth_rx_adapter.h"
27
28 #define BATCH_SIZE              32
29 #define BLOCK_CNT_THRESHOLD     10
30 #define ETH_EVENT_BUFFER_SIZE   (6*BATCH_SIZE)
31 #define MAX_VECTOR_SIZE         1024
32 #define MIN_VECTOR_SIZE         4
33 #define MAX_VECTOR_NS           1E9
34 #define MIN_VECTOR_NS           1E5
35
36 #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
37 #define ETH_RX_ADAPTER_MEM_NAME_LEN     32
38
39 #define RSS_KEY_SIZE    40
40 /* value written to intr thread pipe to signal thread exit */
41 #define ETH_BRIDGE_INTR_THREAD_EXIT     1
42 /* Sentinel value to detect initialized file handle */
43 #define INIT_FD         -1
44
45 #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
46
47 /*
48  * Used to store port and queue ID of interrupting Rx queue
49  */
50 union queue_data {
51         RTE_STD_C11
52         void *ptr;
53         struct {
54                 uint16_t port;
55                 uint16_t queue;
56         };
57 };
58
59 /*
60  * There is an instance of this struct per polled Rx queue added to the
61  * adapter
62  */
63 struct eth_rx_poll_entry {
64         /* Eth port to poll */
65         uint16_t eth_dev_id;
66         /* Eth rx queue to poll */
67         uint16_t eth_rx_qid;
68 };
69
70 struct eth_rx_vector_data {
71         TAILQ_ENTRY(eth_rx_vector_data) next;
72         uint16_t port;
73         uint16_t queue;
74         uint16_t max_vector_count;
75         uint64_t event;
76         uint64_t ts;
77         uint64_t vector_timeout_ticks;
78         struct rte_mempool *vector_pool;
79         struct rte_event_vector *vector_ev;
80 } __rte_cache_aligned;
81
82 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
83
84 /* Instance per adapter */
85 struct rte_eth_event_enqueue_buffer {
86         /* Count of events in this buffer */
87         uint16_t count;
88         /* Array of events in this buffer */
89         struct rte_event *events;
90         /* size of event buffer */
91         uint16_t events_size;
92         /* Event enqueue happens from head */
93         uint16_t head;
94         /* New packets from rte_eth_rx_burst is enqued from tail */
95         uint16_t tail;
96         /* last element in the buffer before rollover */
97         uint16_t last;
98         uint16_t last_mask;
99 };
100
101 struct rte_event_eth_rx_adapter {
102         /* RSS key */
103         uint8_t rss_key_be[RSS_KEY_SIZE];
104         /* Event device identifier */
105         uint8_t eventdev_id;
106         /* Event port identifier */
107         uint8_t event_port_id;
108         /* Flag indicating per rxq event buffer */
109         bool use_queue_event_buf;
110         /* Per ethernet device structure */
111         struct eth_device_info *eth_devices;
112         /* Lock to serialize config updates with service function */
113         rte_spinlock_t rx_lock;
114         /* Max mbufs processed in any service function invocation */
115         uint32_t max_nb_rx;
116         /* Receive queues that need to be polled */
117         struct eth_rx_poll_entry *eth_rx_poll;
118         /* Size of the eth_rx_poll array */
119         uint16_t num_rx_polled;
120         /* Weighted round robin schedule */
121         uint32_t *wrr_sched;
122         /* wrr_sched[] size */
123         uint32_t wrr_len;
124         /* Next entry in wrr[] to begin polling */
125         uint32_t wrr_pos;
126         /* Event burst buffer */
127         struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
128         /* Vector enable flag */
129         uint8_t ena_vector;
130         /* Timestamp of previous vector expiry list traversal */
131         uint64_t prev_expiry_ts;
132         /* Minimum ticks to wait before traversing expiry list */
133         uint64_t vector_tmo_ticks;
134         /* vector list */
135         struct eth_rx_vector_data_list vector_list;
136         /* Per adapter stats */
137         struct rte_event_eth_rx_adapter_stats stats;
138         /* Block count, counts up to BLOCK_CNT_THRESHOLD */
139         uint16_t enq_block_count;
140         /* Block start ts */
141         uint64_t rx_enq_block_start_ts;
142         /* epoll fd used to wait for Rx interrupts */
143         int epd;
144         /* Num of interrupt driven interrupt queues */
145         uint32_t num_rx_intr;
146         /* Used to send <dev id, queue id> of interrupting Rx queues from
147          * the interrupt thread to the Rx thread
148          */
149         struct rte_ring *intr_ring;
150         /* Rx Queue data (dev id, queue id) for the last non-empty
151          * queue polled
152          */
153         union queue_data qd;
154         /* queue_data is valid */
155         int qd_valid;
156         /* Interrupt ring lock, synchronizes Rx thread
157          * and interrupt thread
158          */
159         rte_spinlock_t intr_ring_lock;
160         /* event array passed to rte_poll_wait */
161         struct rte_epoll_event *epoll_events;
162         /* Count of interrupt vectors in use */
163         uint32_t num_intr_vec;
164         /* Thread blocked on Rx interrupts */
165         pthread_t rx_intr_thread;
166         /* Configuration callback for rte_service configuration */
167         rte_event_eth_rx_adapter_conf_cb conf_cb;
168         /* Configuration callback argument */
169         void *conf_arg;
170         /* Set if  default_cb is being used */
171         int default_cb_arg;
172         /* Service initialization state */
173         uint8_t service_inited;
174         /* Total count of Rx queues in adapter */
175         uint32_t nb_queues;
176         /* Memory allocation name */
177         char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
178         /* Socket identifier cached from eventdev */
179         int socket_id;
180         /* Per adapter EAL service */
181         uint32_t service_id;
182         /* Adapter started flag */
183         uint8_t rxa_started;
184         /* Adapter ID */
185         uint8_t id;
186 } __rte_cache_aligned;
187
188 /* Per eth device */
189 struct eth_device_info {
190         struct rte_eth_dev *dev;
191         struct eth_rx_queue_info *rx_queue;
192         /* Rx callback */
193         rte_event_eth_rx_adapter_cb_fn cb_fn;
194         /* Rx callback argument */
195         void *cb_arg;
196         /* Set if ethdev->eventdev packet transfer uses a
197          * hardware mechanism
198          */
199         uint8_t internal_event_port;
200         /* Set if the adapter is processing rx queues for
201          * this eth device and packet processing has been
202          * started, allows for the code to know if the PMD
203          * rx_adapter_stop callback needs to be invoked
204          */
205         uint8_t dev_rx_started;
206         /* Number of queues added for this device */
207         uint16_t nb_dev_queues;
208         /* Number of poll based queues
209          * If nb_rx_poll > 0, the start callback will
210          * be invoked if not already invoked
211          */
212         uint16_t nb_rx_poll;
213         /* Number of interrupt based queues
214          * If nb_rx_intr > 0, the start callback will
215          * be invoked if not already invoked.
216          */
217         uint16_t nb_rx_intr;
218         /* Number of queues that use the shared interrupt */
219         uint16_t nb_shared_intr;
220         /* sum(wrr(q)) for all queues within the device
221          * useful when deleting all device queues
222          */
223         uint32_t wrr_len;
224         /* Intr based queue index to start polling from, this is used
225          * if the number of shared interrupts is non-zero
226          */
227         uint16_t next_q_idx;
228         /* Intr based queue indices */
229         uint16_t *intr_queue;
230         /* device generates per Rx queue interrupt for queue index
231          * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
232          */
233         int multi_intr_cap;
234         /* shared interrupt enabled */
235         int shared_intr_enabled;
236 };
237
238 /* Per Rx queue */
239 struct eth_rx_queue_info {
240         int queue_enabled;      /* True if added */
241         int intr_enabled;
242         uint8_t ena_vector;
243         uint16_t wt;            /* Polling weight */
244         uint32_t flow_id_mask;  /* Set to ~0 if app provides flow id else 0 */
245         uint64_t event;
246         struct eth_rx_vector_data vector_data;
247         struct rte_eth_event_enqueue_buffer *event_buf;
248 };
249
250 static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
251
252 /* Enable dynamic timestamp field in mbuf */
253 static uint64_t event_eth_rx_timestamp_dynflag;
254 static int event_eth_rx_timestamp_dynfield_offset = -1;
255
256 static inline rte_mbuf_timestamp_t *
257 rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
258 {
259         return RTE_MBUF_DYNFIELD(mbuf,
260                 event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
261 }
262
263 static inline int
264 rxa_validate_id(uint8_t id)
265 {
266         return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
267 }
268
269 static inline struct rte_eth_event_enqueue_buffer *
270 rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter,
271                   uint16_t eth_dev_id, uint16_t rx_queue_id)
272 {
273         if (rx_adapter->use_queue_event_buf) {
274                 struct eth_device_info *dev_info =
275                         &rx_adapter->eth_devices[eth_dev_id];
276                 return dev_info->rx_queue[rx_queue_id].event_buf;
277         } else
278                 return &rx_adapter->event_enqueue_buffer;
279 }
280
281 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
282         if (!rxa_validate_id(id)) { \
283                 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
284                 return retval; \
285         } \
286 } while (0)
287
288 static inline int
289 rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
290 {
291         return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
292 }
293
294 /* Greatest common divisor */
295 static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
296 {
297         uint16_t r = a % b;
298
299         return r ? rxa_gcd_u16(b, r) : b;
300 }
301
302 /* Returns the next queue in the polling sequence
303  *
304  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
305  */
306 static int
307 rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
308          unsigned int n, int *cw,
309          struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
310          uint16_t gcd, int prev)
311 {
312         int i = prev;
313         uint16_t w;
314
315         while (1) {
316                 uint16_t q;
317                 uint16_t d;
318
319                 i = (i + 1) % n;
320                 if (i == 0) {
321                         *cw = *cw - gcd;
322                         if (*cw <= 0)
323                                 *cw = max_wt;
324                 }
325
326                 q = eth_rx_poll[i].eth_rx_qid;
327                 d = eth_rx_poll[i].eth_dev_id;
328                 w = rx_adapter->eth_devices[d].rx_queue[q].wt;
329
330                 if ((int)w >= *cw)
331                         return i;
332         }
333 }
334
335 static inline int
336 rxa_shared_intr(struct eth_device_info *dev_info,
337         int rx_queue_id)
338 {
339         int multi_intr_cap;
340
341         if (dev_info->dev->intr_handle == NULL)
342                 return 0;
343
344         multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
345         return !multi_intr_cap ||
346                 rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
347 }
348
349 static inline int
350 rxa_intr_queue(struct eth_device_info *dev_info,
351         int rx_queue_id)
352 {
353         struct eth_rx_queue_info *queue_info;
354
355         queue_info = &dev_info->rx_queue[rx_queue_id];
356         return dev_info->rx_queue &&
357                 !dev_info->internal_event_port &&
358                 queue_info->queue_enabled && queue_info->wt == 0;
359 }
360
361 static inline int
362 rxa_polled_queue(struct eth_device_info *dev_info,
363         int rx_queue_id)
364 {
365         struct eth_rx_queue_info *queue_info;
366
367         queue_info = &dev_info->rx_queue[rx_queue_id];
368         return !dev_info->internal_event_port &&
369                 dev_info->rx_queue &&
370                 queue_info->queue_enabled && queue_info->wt != 0;
371 }
372
373 /* Calculate change in number of vectors after Rx queue ID is add/deleted */
374 static int
375 rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
376 {
377         uint16_t i;
378         int n, s;
379         uint16_t nbq;
380
381         nbq = dev_info->dev->data->nb_rx_queues;
382         n = 0; /* non shared count */
383         s = 0; /* shared count */
384
385         if (rx_queue_id == -1) {
386                 for (i = 0; i < nbq; i++) {
387                         if (!rxa_shared_intr(dev_info, i))
388                                 n += add ? !rxa_intr_queue(dev_info, i) :
389                                         rxa_intr_queue(dev_info, i);
390                         else
391                                 s += add ? !rxa_intr_queue(dev_info, i) :
392                                         rxa_intr_queue(dev_info, i);
393                 }
394
395                 if (s > 0) {
396                         if ((add && dev_info->nb_shared_intr == 0) ||
397                                 (!add && dev_info->nb_shared_intr))
398                                 n += 1;
399                 }
400         } else {
401                 if (!rxa_shared_intr(dev_info, rx_queue_id))
402                         n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
403                                 rxa_intr_queue(dev_info, rx_queue_id);
404                 else
405                         n = add ? !dev_info->nb_shared_intr :
406                                 dev_info->nb_shared_intr == 1;
407         }
408
409         return add ? n : -n;
410 }
411
412 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
413  */
414 static void
415 rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
416                         struct eth_device_info *dev_info,
417                         int rx_queue_id,
418                         uint32_t *nb_rx_intr)
419 {
420         uint32_t intr_diff;
421
422         if (rx_queue_id == -1)
423                 intr_diff = dev_info->nb_rx_intr;
424         else
425                 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
426
427         *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
428 }
429
430 /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
431  * interrupt queues could currently be poll mode Rx queues
432  */
433 static void
434 rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
435                         struct eth_device_info *dev_info,
436                         int rx_queue_id,
437                         uint32_t *nb_rx_poll,
438                         uint32_t *nb_rx_intr,
439                         uint32_t *nb_wrr)
440 {
441         uint32_t intr_diff;
442         uint32_t poll_diff;
443         uint32_t wrr_len_diff;
444
445         if (rx_queue_id == -1) {
446                 intr_diff = dev_info->dev->data->nb_rx_queues -
447                                                 dev_info->nb_rx_intr;
448                 poll_diff = dev_info->nb_rx_poll;
449                 wrr_len_diff = dev_info->wrr_len;
450         } else {
451                 intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
452                 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
453                 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
454                                         0;
455         }
456
457         *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
458         *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
459         *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
460 }
461
462 /* Calculate size of the eth_rx_poll and wrr_sched arrays
463  * after deleting poll mode rx queues
464  */
465 static void
466 rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
467                         struct eth_device_info *dev_info,
468                         int rx_queue_id,
469                         uint32_t *nb_rx_poll,
470                         uint32_t *nb_wrr)
471 {
472         uint32_t poll_diff;
473         uint32_t wrr_len_diff;
474
475         if (rx_queue_id == -1) {
476                 poll_diff = dev_info->nb_rx_poll;
477                 wrr_len_diff = dev_info->wrr_len;
478         } else {
479                 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
480                 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
481                                         0;
482         }
483
484         *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
485         *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
486 }
487
488 /* Calculate nb_rx_* after adding poll mode rx queues
489  */
490 static void
491 rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
492                         struct eth_device_info *dev_info,
493                         int rx_queue_id,
494                         uint16_t wt,
495                         uint32_t *nb_rx_poll,
496                         uint32_t *nb_rx_intr,
497                         uint32_t *nb_wrr)
498 {
499         uint32_t intr_diff;
500         uint32_t poll_diff;
501         uint32_t wrr_len_diff;
502
503         if (rx_queue_id == -1) {
504                 intr_diff = dev_info->nb_rx_intr;
505                 poll_diff = dev_info->dev->data->nb_rx_queues -
506                                                 dev_info->nb_rx_poll;
507                 wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
508                                 - dev_info->wrr_len;
509         } else {
510                 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
511                 poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
512                 wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
513                                 wt - dev_info->rx_queue[rx_queue_id].wt :
514                                 wt;
515         }
516
517         *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
518         *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
519         *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
520 }
521
522 /* Calculate nb_rx_* after adding rx_queue_id */
523 static void
524 rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
525                 struct eth_device_info *dev_info,
526                 int rx_queue_id,
527                 uint16_t wt,
528                 uint32_t *nb_rx_poll,
529                 uint32_t *nb_rx_intr,
530                 uint32_t *nb_wrr)
531 {
532         if (wt != 0)
533                 rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
534                                         wt, nb_rx_poll, nb_rx_intr, nb_wrr);
535         else
536                 rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
537                                         nb_rx_poll, nb_rx_intr, nb_wrr);
538 }
539
540 /* Calculate nb_rx_* after deleting rx_queue_id */
541 static void
542 rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
543                 struct eth_device_info *dev_info,
544                 int rx_queue_id,
545                 uint32_t *nb_rx_poll,
546                 uint32_t *nb_rx_intr,
547                 uint32_t *nb_wrr)
548 {
549         rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
550                                 nb_wrr);
551         rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
552                                 nb_rx_intr);
553 }
554
555 /*
556  * Allocate the rx_poll array
557  */
558 static struct eth_rx_poll_entry *
559 rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
560         uint32_t num_rx_polled)
561 {
562         size_t len;
563
564         len  = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
565                                                         RTE_CACHE_LINE_SIZE);
566         return  rte_zmalloc_socket(rx_adapter->mem_name,
567                                 len,
568                                 RTE_CACHE_LINE_SIZE,
569                                 rx_adapter->socket_id);
570 }
571
572 /*
573  * Allocate the WRR array
574  */
575 static uint32_t *
576 rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
577 {
578         size_t len;
579
580         len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
581                         RTE_CACHE_LINE_SIZE);
582         return  rte_zmalloc_socket(rx_adapter->mem_name,
583                                 len,
584                                 RTE_CACHE_LINE_SIZE,
585                                 rx_adapter->socket_id);
586 }
587
588 static int
589 rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
590                 uint32_t nb_poll,
591                 uint32_t nb_wrr,
592                 struct eth_rx_poll_entry **rx_poll,
593                 uint32_t **wrr_sched)
594 {
595
596         if (nb_poll == 0) {
597                 *rx_poll = NULL;
598                 *wrr_sched = NULL;
599                 return 0;
600         }
601
602         *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
603         if (*rx_poll == NULL) {
604                 *wrr_sched = NULL;
605                 return -ENOMEM;
606         }
607
608         *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
609         if (*wrr_sched == NULL) {
610                 rte_free(*rx_poll);
611                 return -ENOMEM;
612         }
613         return 0;
614 }
615
616 /* Precalculate WRR polling sequence for all queues in rx_adapter */
617 static void
618 rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
619                 struct eth_rx_poll_entry *rx_poll,
620                 uint32_t *rx_wrr)
621 {
622         uint16_t d;
623         uint16_t q;
624         unsigned int i;
625         int prev = -1;
626         int cw = -1;
627
628         /* Initialize variables for calculation of wrr schedule */
629         uint16_t max_wrr_pos = 0;
630         unsigned int poll_q = 0;
631         uint16_t max_wt = 0;
632         uint16_t gcd = 0;
633
634         if (rx_poll == NULL)
635                 return;
636
637         /* Generate array of all queues to poll, the size of this
638          * array is poll_q
639          */
640         RTE_ETH_FOREACH_DEV(d) {
641                 uint16_t nb_rx_queues;
642                 struct eth_device_info *dev_info =
643                                 &rx_adapter->eth_devices[d];
644                 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
645                 if (dev_info->rx_queue == NULL)
646                         continue;
647                 if (dev_info->internal_event_port)
648                         continue;
649                 dev_info->wrr_len = 0;
650                 for (q = 0; q < nb_rx_queues; q++) {
651                         struct eth_rx_queue_info *queue_info =
652                                 &dev_info->rx_queue[q];
653                         uint16_t wt;
654
655                         if (!rxa_polled_queue(dev_info, q))
656                                 continue;
657                         wt = queue_info->wt;
658                         rx_poll[poll_q].eth_dev_id = d;
659                         rx_poll[poll_q].eth_rx_qid = q;
660                         max_wrr_pos += wt;
661                         dev_info->wrr_len += wt;
662                         max_wt = RTE_MAX(max_wt, wt);
663                         gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
664                         poll_q++;
665                 }
666         }
667
668         /* Generate polling sequence based on weights */
669         prev = -1;
670         cw = -1;
671         for (i = 0; i < max_wrr_pos; i++) {
672                 rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
673                                      rx_poll, max_wt, gcd, prev);
674                 prev = rx_wrr[i];
675         }
676 }
677
678 static inline void
679 rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
680         struct rte_ipv6_hdr **ipv6_hdr)
681 {
682         struct rte_ether_hdr *eth_hdr =
683                 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
684         struct rte_vlan_hdr *vlan_hdr;
685
686         *ipv4_hdr = NULL;
687         *ipv6_hdr = NULL;
688
689         switch (eth_hdr->ether_type) {
690         case RTE_BE16(RTE_ETHER_TYPE_IPV4):
691                 *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
692                 break;
693
694         case RTE_BE16(RTE_ETHER_TYPE_IPV6):
695                 *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
696                 break;
697
698         case RTE_BE16(RTE_ETHER_TYPE_VLAN):
699                 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
700                 switch (vlan_hdr->eth_proto) {
701                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
702                         *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
703                         break;
704                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
705                         *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
706                         break;
707                 default:
708                         break;
709                 }
710                 break;
711
712         default:
713                 break;
714         }
715 }
716
717 /* Calculate RSS hash for IPv4/6 */
718 static inline uint32_t
719 rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
720 {
721         uint32_t input_len;
722         void *tuple;
723         struct rte_ipv4_tuple ipv4_tuple;
724         struct rte_ipv6_tuple ipv6_tuple;
725         struct rte_ipv4_hdr *ipv4_hdr;
726         struct rte_ipv6_hdr *ipv6_hdr;
727
728         rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
729
730         if (ipv4_hdr) {
731                 ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
732                 ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
733                 tuple = &ipv4_tuple;
734                 input_len = RTE_THASH_V4_L3_LEN;
735         } else if (ipv6_hdr) {
736                 rte_thash_load_v6_addrs(ipv6_hdr,
737                                         (union rte_thash_tuple *)&ipv6_tuple);
738                 tuple = &ipv6_tuple;
739                 input_len = RTE_THASH_V6_L3_LEN;
740         } else
741                 return 0;
742
743         return rte_softrss_be(tuple, input_len, rss_key_be);
744 }
745
746 static inline int
747 rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
748 {
749         return !!rx_adapter->enq_block_count;
750 }
751
752 static inline void
753 rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
754 {
755         if (rx_adapter->rx_enq_block_start_ts)
756                 return;
757
758         rx_adapter->enq_block_count++;
759         if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
760                 return;
761
762         rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
763 }
764
765 static inline void
766 rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
767                     struct rte_event_eth_rx_adapter_stats *stats)
768 {
769         if (unlikely(!stats->rx_enq_start_ts))
770                 stats->rx_enq_start_ts = rte_get_tsc_cycles();
771
772         if (likely(!rxa_enq_blocked(rx_adapter)))
773                 return;
774
775         rx_adapter->enq_block_count = 0;
776         if (rx_adapter->rx_enq_block_start_ts) {
777                 stats->rx_enq_end_ts = rte_get_tsc_cycles();
778                 stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
779                     rx_adapter->rx_enq_block_start_ts;
780                 rx_adapter->rx_enq_block_start_ts = 0;
781         }
782 }
783
784 /* Enqueue buffered events to event device */
785 static inline uint16_t
786 rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
787                        struct rte_eth_event_enqueue_buffer *buf)
788 {
789         struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
790         uint16_t count = buf->last ? buf->last - buf->head : buf->count;
791
792         if (!count)
793                 return 0;
794
795         uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
796                                         rx_adapter->event_port_id,
797                                         &buf->events[buf->head],
798                                         count);
799         if (n != count)
800                 stats->rx_enq_retry++;
801
802         buf->head += n;
803
804         if (buf->last && n == count) {
805                 uint16_t n1;
806
807                 n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
808                                         rx_adapter->event_port_id,
809                                         &buf->events[0],
810                                         buf->tail);
811
812                 if (n1 != buf->tail)
813                         stats->rx_enq_retry++;
814
815                 buf->last = 0;
816                 buf->head = n1;
817                 buf->last_mask = 0;
818                 n += n1;
819         }
820
821         n ? rxa_enq_block_end_ts(rx_adapter, stats) :
822                 rxa_enq_block_start_ts(rx_adapter);
823
824         buf->count -= n;
825         stats->rx_enq_count += n;
826
827         return n;
828 }
829
830 static inline void
831 rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
832                 struct eth_rx_vector_data *vec)
833 {
834         vec->vector_ev->nb_elem = 0;
835         vec->vector_ev->port = vec->port;
836         vec->vector_ev->queue = vec->queue;
837         vec->vector_ev->attr_valid = true;
838         TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
839 }
840
841 static inline uint16_t
842 rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
843                         struct eth_rx_queue_info *queue_info,
844                         struct rte_eth_event_enqueue_buffer *buf,
845                         struct rte_mbuf **mbufs, uint16_t num)
846 {
847         struct rte_event *ev = &buf->events[buf->count];
848         struct eth_rx_vector_data *vec;
849         uint16_t filled, space, sz;
850
851         filled = 0;
852         vec = &queue_info->vector_data;
853
854         if (vec->vector_ev == NULL) {
855                 if (rte_mempool_get(vec->vector_pool,
856                                     (void **)&vec->vector_ev) < 0) {
857                         rte_pktmbuf_free_bulk(mbufs, num);
858                         return 0;
859                 }
860                 rxa_init_vector(rx_adapter, vec);
861         }
862         while (num) {
863                 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
864                         /* Event ready. */
865                         ev->event = vec->event;
866                         ev->vec = vec->vector_ev;
867                         ev++;
868                         filled++;
869                         vec->vector_ev = NULL;
870                         TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
871                         if (rte_mempool_get(vec->vector_pool,
872                                             (void **)&vec->vector_ev) < 0) {
873                                 rte_pktmbuf_free_bulk(mbufs, num);
874                                 return 0;
875                         }
876                         rxa_init_vector(rx_adapter, vec);
877                 }
878
879                 space = vec->max_vector_count - vec->vector_ev->nb_elem;
880                 sz = num > space ? space : num;
881                 memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
882                        sizeof(void *) * sz);
883                 vec->vector_ev->nb_elem += sz;
884                 num -= sz;
885                 mbufs += sz;
886                 vec->ts = rte_rdtsc();
887         }
888
889         if (vec->vector_ev->nb_elem == vec->max_vector_count) {
890                 ev->event = vec->event;
891                 ev->vec = vec->vector_ev;
892                 ev++;
893                 filled++;
894                 vec->vector_ev = NULL;
895                 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
896         }
897
898         return filled;
899 }
900
901 static inline void
902 rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
903                 uint16_t eth_dev_id,
904                 uint16_t rx_queue_id,
905                 struct rte_mbuf **mbufs,
906                 uint16_t num,
907                 struct rte_eth_event_enqueue_buffer *buf)
908 {
909         uint32_t i;
910         struct eth_device_info *dev_info =
911                                         &rx_adapter->eth_devices[eth_dev_id];
912         struct eth_rx_queue_info *eth_rx_queue_info =
913                                         &dev_info->rx_queue[rx_queue_id];
914         uint16_t new_tail = buf->tail;
915         uint64_t event = eth_rx_queue_info->event;
916         uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
917         struct rte_mbuf *m = mbufs[0];
918         uint32_t rss_mask;
919         uint32_t rss;
920         int do_rss;
921         uint16_t nb_cb;
922         uint16_t dropped;
923         uint64_t ts, ts_mask;
924
925         if (!eth_rx_queue_info->ena_vector) {
926                 ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
927                                                 0 : rte_get_tsc_cycles();
928
929                 /* 0xffff ffff ffff ffff if PKT_RX_TIMESTAMP is set,
930                  * otherwise 0
931                  */
932                 ts_mask = (uint64_t)(!(m->ol_flags &
933                                        event_eth_rx_timestamp_dynflag)) - 1ULL;
934
935                 /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
936                 rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
937                 do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
938                 for (i = 0; i < num; i++) {
939                         struct rte_event *ev;
940
941                         m = mbufs[i];
942                         *rxa_timestamp_dynfield(m) = ts |
943                                         (*rxa_timestamp_dynfield(m) & ts_mask);
944
945                         ev = &buf->events[new_tail];
946
947                         rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
948                                      : m->hash.rss;
949                         ev->event = event;
950                         ev->flow_id = (rss & ~flow_id_mask) |
951                                       (ev->flow_id & flow_id_mask);
952                         ev->mbuf = m;
953                         new_tail++;
954                 }
955         } else {
956                 num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
957                                               buf, mbufs, num);
958         }
959
960         if (num && dev_info->cb_fn) {
961
962                 dropped = 0;
963                 nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
964                                        buf->last |
965                                        (buf->events_size & ~buf->last_mask),
966                                        buf->count >= BATCH_SIZE ?
967                                                 buf->count - BATCH_SIZE : 0,
968                                        &buf->events[buf->tail],
969                                        num,
970                                        dev_info->cb_arg,
971                                        &dropped);
972                 if (unlikely(nb_cb > num))
973                         RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
974                                 nb_cb, num);
975                 else
976                         num = nb_cb;
977                 if (dropped)
978                         rx_adapter->stats.rx_dropped += dropped;
979         }
980
981         buf->count += num;
982         buf->tail += num;
983 }
984
985 static inline bool
986 rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
987 {
988         uint32_t nb_req = buf->tail + BATCH_SIZE;
989
990         if (!buf->last) {
991                 if (nb_req <= buf->events_size)
992                         return true;
993
994                 if (buf->head >= BATCH_SIZE) {
995                         buf->last_mask = ~0;
996                         buf->last = buf->tail;
997                         buf->tail = 0;
998                         return true;
999                 }
1000         }
1001
1002         return nb_req <= buf->head;
1003 }
1004
1005 /* Enqueue packets from  <port, q>  to event buffer */
1006 static inline uint32_t
1007 rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
1008         uint16_t port_id,
1009         uint16_t queue_id,
1010         uint32_t rx_count,
1011         uint32_t max_rx,
1012         int *rxq_empty,
1013         struct rte_eth_event_enqueue_buffer *buf)
1014 {
1015         struct rte_mbuf *mbufs[BATCH_SIZE];
1016         struct rte_event_eth_rx_adapter_stats *stats =
1017                                         &rx_adapter->stats;
1018         uint16_t n;
1019         uint32_t nb_rx = 0;
1020
1021         if (rxq_empty)
1022                 *rxq_empty = 0;
1023         /* Don't do a batch dequeue from the rx queue if there isn't
1024          * enough space in the enqueue buffer.
1025          */
1026         while (rxa_pkt_buf_available(buf)) {
1027                 if (buf->count >= BATCH_SIZE)
1028                         rxa_flush_event_buffer(rx_adapter, buf);
1029
1030                 stats->rx_poll_count++;
1031                 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
1032                 if (unlikely(!n)) {
1033                         if (rxq_empty)
1034                                 *rxq_empty = 1;
1035                         break;
1036                 }
1037                 rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
1038                 nb_rx += n;
1039                 if (rx_count + nb_rx > max_rx)
1040                         break;
1041         }
1042
1043         if (buf->count > 0)
1044                 rxa_flush_event_buffer(rx_adapter, buf);
1045
1046         return nb_rx;
1047 }
1048
1049 static inline void
1050 rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
1051                 void *data)
1052 {
1053         uint16_t port_id;
1054         uint16_t queue;
1055         int err;
1056         union queue_data qd;
1057         struct eth_device_info *dev_info;
1058         struct eth_rx_queue_info *queue_info;
1059         int *intr_enabled;
1060
1061         qd.ptr = data;
1062         port_id = qd.port;
1063         queue = qd.queue;
1064
1065         dev_info = &rx_adapter->eth_devices[port_id];
1066         queue_info = &dev_info->rx_queue[queue];
1067         rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1068         if (rxa_shared_intr(dev_info, queue))
1069                 intr_enabled = &dev_info->shared_intr_enabled;
1070         else
1071                 intr_enabled = &queue_info->intr_enabled;
1072
1073         if (*intr_enabled) {
1074                 *intr_enabled = 0;
1075                 err = rte_ring_enqueue(rx_adapter->intr_ring, data);
1076                 /* Entry should always be available.
1077                  * The ring size equals the maximum number of interrupt
1078                  * vectors supported (an interrupt vector is shared in
1079                  * case of shared interrupts)
1080                  */
1081                 if (err)
1082                         RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
1083                                 " to ring: %s", strerror(-err));
1084                 else
1085                         rte_eth_dev_rx_intr_disable(port_id, queue);
1086         }
1087         rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1088 }
1089
1090 static int
1091 rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
1092                         uint32_t num_intr_vec)
1093 {
1094         if (rx_adapter->num_intr_vec + num_intr_vec >
1095                                 RTE_EVENT_ETH_INTR_RING_SIZE) {
1096                 RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
1097                 " %d needed %d limit %d", rx_adapter->num_intr_vec,
1098                 num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
1099                 return -ENOSPC;
1100         }
1101
1102         return 0;
1103 }
1104
1105 /* Delete entries for (dev, queue) from the interrupt ring */
1106 static void
1107 rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
1108                         struct eth_device_info *dev_info,
1109                         uint16_t rx_queue_id)
1110 {
1111         int i, n;
1112         union queue_data qd;
1113
1114         rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1115
1116         n = rte_ring_count(rx_adapter->intr_ring);
1117         for (i = 0; i < n; i++) {
1118                 rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1119                 if (!rxa_shared_intr(dev_info, rx_queue_id)) {
1120                         if (qd.port == dev_info->dev->data->port_id &&
1121                                 qd.queue == rx_queue_id)
1122                                 continue;
1123                 } else {
1124                         if (qd.port == dev_info->dev->data->port_id)
1125                                 continue;
1126                 }
1127                 rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
1128         }
1129
1130         rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1131 }
1132
1133 /* pthread callback handling interrupt mode receive queues
1134  * After receiving an Rx interrupt, it enqueues the port id and queue id of the
1135  * interrupting queue to the adapter's ring buffer for interrupt events.
1136  * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
1137  * the adapter service function.
1138  */
1139 static void *
1140 rxa_intr_thread(void *arg)
1141 {
1142         struct rte_event_eth_rx_adapter *rx_adapter = arg;
1143         struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
1144         int n, i;
1145
1146         while (1) {
1147                 n = rte_epoll_wait(rx_adapter->epd, epoll_events,
1148                                 RTE_EVENT_ETH_INTR_RING_SIZE, -1);
1149                 if (unlikely(n < 0))
1150                         RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
1151                                         n);
1152                 for (i = 0; i < n; i++) {
1153                         rxa_intr_ring_enqueue(rx_adapter,
1154                                         epoll_events[i].epdata.data);
1155                 }
1156         }
1157
1158         return NULL;
1159 }
1160
1161 /* Dequeue <port, q> from interrupt ring and enqueue received
1162  * mbufs to eventdev
1163  */
1164 static inline uint32_t
1165 rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
1166 {
1167         uint32_t n;
1168         uint32_t nb_rx = 0;
1169         int rxq_empty;
1170         struct rte_eth_event_enqueue_buffer *buf;
1171         rte_spinlock_t *ring_lock;
1172         uint8_t max_done = 0;
1173
1174         if (rx_adapter->num_rx_intr == 0)
1175                 return 0;
1176
1177         if (rte_ring_count(rx_adapter->intr_ring) == 0
1178                 && !rx_adapter->qd_valid)
1179                 return 0;
1180
1181         buf = &rx_adapter->event_enqueue_buffer;
1182         ring_lock = &rx_adapter->intr_ring_lock;
1183
1184         if (buf->count >= BATCH_SIZE)
1185                 rxa_flush_event_buffer(rx_adapter, buf);
1186
1187         while (rxa_pkt_buf_available(buf)) {
1188                 struct eth_device_info *dev_info;
1189                 uint16_t port;
1190                 uint16_t queue;
1191                 union queue_data qd  = rx_adapter->qd;
1192                 int err;
1193
1194                 if (!rx_adapter->qd_valid) {
1195                         struct eth_rx_queue_info *queue_info;
1196
1197                         rte_spinlock_lock(ring_lock);
1198                         err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1199                         if (err) {
1200                                 rte_spinlock_unlock(ring_lock);
1201                                 break;
1202                         }
1203
1204                         port = qd.port;
1205                         queue = qd.queue;
1206                         rx_adapter->qd = qd;
1207                         rx_adapter->qd_valid = 1;
1208                         dev_info = &rx_adapter->eth_devices[port];
1209                         if (rxa_shared_intr(dev_info, queue))
1210                                 dev_info->shared_intr_enabled = 1;
1211                         else {
1212                                 queue_info = &dev_info->rx_queue[queue];
1213                                 queue_info->intr_enabled = 1;
1214                         }
1215                         rte_eth_dev_rx_intr_enable(port, queue);
1216                         rte_spinlock_unlock(ring_lock);
1217                 } else {
1218                         port = qd.port;
1219                         queue = qd.queue;
1220
1221                         dev_info = &rx_adapter->eth_devices[port];
1222                 }
1223
1224                 if (rxa_shared_intr(dev_info, queue)) {
1225                         uint16_t i;
1226                         uint16_t nb_queues;
1227
1228                         nb_queues = dev_info->dev->data->nb_rx_queues;
1229                         n = 0;
1230                         for (i = dev_info->next_q_idx; i < nb_queues; i++) {
1231                                 uint8_t enq_buffer_full;
1232
1233                                 if (!rxa_intr_queue(dev_info, i))
1234                                         continue;
1235                                 n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
1236                                         rx_adapter->max_nb_rx,
1237                                         &rxq_empty, buf);
1238                                 nb_rx += n;
1239
1240                                 enq_buffer_full = !rxq_empty && n == 0;
1241                                 max_done = nb_rx > rx_adapter->max_nb_rx;
1242
1243                                 if (enq_buffer_full || max_done) {
1244                                         dev_info->next_q_idx = i;
1245                                         goto done;
1246                                 }
1247                         }
1248
1249                         rx_adapter->qd_valid = 0;
1250
1251                         /* Reinitialize for next interrupt */
1252                         dev_info->next_q_idx = dev_info->multi_intr_cap ?
1253                                                 RTE_MAX_RXTX_INTR_VEC_ID - 1 :
1254                                                 0;
1255                 } else {
1256                         n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
1257                                 rx_adapter->max_nb_rx,
1258                                 &rxq_empty, buf);
1259                         rx_adapter->qd_valid = !rxq_empty;
1260                         nb_rx += n;
1261                         if (nb_rx > rx_adapter->max_nb_rx)
1262                                 break;
1263                 }
1264         }
1265
1266 done:
1267         rx_adapter->stats.rx_intr_packets += nb_rx;
1268         return nb_rx;
1269 }
1270
1271 /*
1272  * Polls receive queues added to the event adapter and enqueues received
1273  * packets to the event device.
1274  *
1275  * The receive code enqueues initially to a temporary buffer, the
1276  * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
1277  *
1278  * If there isn't space available in the temporary buffer, packets from the
1279  * Rx queue aren't dequeued from the eth device, this back pressures the
1280  * eth device, in virtual device environments this back pressure is relayed to
1281  * the hypervisor's switching layer where adjustments can be made to deal with
1282  * it.
1283  */
1284 static inline uint32_t
1285 rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
1286 {
1287         uint32_t num_queue;
1288         uint32_t nb_rx = 0;
1289         struct rte_eth_event_enqueue_buffer *buf = NULL;
1290         uint32_t wrr_pos;
1291         uint32_t max_nb_rx;
1292
1293         wrr_pos = rx_adapter->wrr_pos;
1294         max_nb_rx = rx_adapter->max_nb_rx;
1295
1296         /* Iterate through a WRR sequence */
1297         for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
1298                 unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
1299                 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
1300                 uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
1301
1302                 buf = rxa_event_buf_get(rx_adapter, d, qid);
1303
1304                 /* Don't do a batch dequeue from the rx queue if there isn't
1305                  * enough space in the enqueue buffer.
1306                  */
1307                 if (buf->count >= BATCH_SIZE)
1308                         rxa_flush_event_buffer(rx_adapter, buf);
1309                 if (!rxa_pkt_buf_available(buf)) {
1310                         if (rx_adapter->use_queue_event_buf)
1311                                 goto poll_next_entry;
1312                         else {
1313                                 rx_adapter->wrr_pos = wrr_pos;
1314                                 return nb_rx;
1315                         }
1316                 }
1317
1318                 nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1319                                 NULL, buf);
1320                 if (nb_rx > max_nb_rx) {
1321                         rx_adapter->wrr_pos =
1322                                     (wrr_pos + 1) % rx_adapter->wrr_len;
1323                         break;
1324                 }
1325
1326 poll_next_entry:
1327                 if (++wrr_pos == rx_adapter->wrr_len)
1328                         wrr_pos = 0;
1329         }
1330         return nb_rx;
1331 }
1332
1333 static void
1334 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
1335 {
1336         struct rte_event_eth_rx_adapter *rx_adapter = arg;
1337         struct rte_eth_event_enqueue_buffer *buf = NULL;
1338         struct rte_event *ev;
1339
1340         buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
1341
1342         if (buf->count)
1343                 rxa_flush_event_buffer(rx_adapter, buf);
1344
1345         if (vec->vector_ev->nb_elem == 0)
1346                 return;
1347         ev = &buf->events[buf->count];
1348
1349         /* Event ready. */
1350         ev->event = vec->event;
1351         ev->vec = vec->vector_ev;
1352         buf->count++;
1353
1354         vec->vector_ev = NULL;
1355         vec->ts = 0;
1356 }
1357
1358 static int
1359 rxa_service_func(void *args)
1360 {
1361         struct rte_event_eth_rx_adapter *rx_adapter = args;
1362         struct rte_event_eth_rx_adapter_stats *stats;
1363
1364         if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
1365                 return 0;
1366         if (!rx_adapter->rxa_started) {
1367                 rte_spinlock_unlock(&rx_adapter->rx_lock);
1368                 return 0;
1369         }
1370
1371         if (rx_adapter->ena_vector) {
1372                 if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
1373                     rx_adapter->vector_tmo_ticks) {
1374                         struct eth_rx_vector_data *vec;
1375
1376                         TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1377                                 uint64_t elapsed_time = rte_rdtsc() - vec->ts;
1378
1379                                 if (elapsed_time >= vec->vector_timeout_ticks) {
1380                                         rxa_vector_expire(vec, rx_adapter);
1381                                         TAILQ_REMOVE(&rx_adapter->vector_list,
1382                                                      vec, next);
1383                                 }
1384                         }
1385                         rx_adapter->prev_expiry_ts = rte_rdtsc();
1386                 }
1387         }
1388
1389         stats = &rx_adapter->stats;
1390         stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
1391         stats->rx_packets += rxa_poll(rx_adapter);
1392         rte_spinlock_unlock(&rx_adapter->rx_lock);
1393         return 0;
1394 }
1395
1396 static int
1397 rte_event_eth_rx_adapter_init(void)
1398 {
1399         const char *name = RXA_ADAPTER_ARRAY;
1400         const struct rte_memzone *mz;
1401         unsigned int sz;
1402
1403         sz = sizeof(*event_eth_rx_adapter) *
1404             RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
1405         sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
1406
1407         mz = rte_memzone_lookup(name);
1408         if (mz == NULL) {
1409                 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
1410                                                  RTE_CACHE_LINE_SIZE);
1411                 if (mz == NULL) {
1412                         RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
1413                                         PRId32, rte_errno);
1414                         return -rte_errno;
1415                 }
1416         }
1417
1418         event_eth_rx_adapter = mz->addr;
1419         return 0;
1420 }
1421
1422 static int
1423 rxa_memzone_lookup(void)
1424 {
1425         const struct rte_memzone *mz;
1426
1427         if (event_eth_rx_adapter == NULL) {
1428                 mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1429                 if (mz == NULL)
1430                         return -ENOMEM;
1431                 event_eth_rx_adapter = mz->addr;
1432         }
1433
1434         return 0;
1435 }
1436
1437 static inline struct rte_event_eth_rx_adapter *
1438 rxa_id_to_adapter(uint8_t id)
1439 {
1440         return event_eth_rx_adapter ?
1441                 event_eth_rx_adapter[id] : NULL;
1442 }
1443
1444 static int
1445 rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
1446                 struct rte_event_eth_rx_adapter_conf *conf, void *arg)
1447 {
1448         int ret;
1449         struct rte_eventdev *dev;
1450         struct rte_event_dev_config dev_conf;
1451         int started;
1452         uint8_t port_id;
1453         struct rte_event_port_conf *port_conf = arg;
1454         struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
1455
1456         dev = &rte_eventdevs[rx_adapter->eventdev_id];
1457         dev_conf = dev->data->dev_conf;
1458
1459         started = dev->data->dev_started;
1460         if (started)
1461                 rte_event_dev_stop(dev_id);
1462         port_id = dev_conf.nb_event_ports;
1463         dev_conf.nb_event_ports += 1;
1464         ret = rte_event_dev_configure(dev_id, &dev_conf);
1465         if (ret) {
1466                 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
1467                                                 dev_id);
1468                 if (started) {
1469                         if (rte_event_dev_start(dev_id))
1470                                 return -EIO;
1471                 }
1472                 return ret;
1473         }
1474
1475         ret = rte_event_port_setup(dev_id, port_id, port_conf);
1476         if (ret) {
1477                 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
1478                                         port_id);
1479                 return ret;
1480         }
1481
1482         conf->event_port_id = port_id;
1483         conf->max_nb_rx = 128;
1484         if (started)
1485                 ret = rte_event_dev_start(dev_id);
1486         rx_adapter->default_cb_arg = 1;
1487         return ret;
1488 }
1489
1490 static int
1491 rxa_epoll_create1(void)
1492 {
1493 #if defined(LINUX)
1494         int fd;
1495         fd = epoll_create1(EPOLL_CLOEXEC);
1496         return fd < 0 ? -errno : fd;
1497 #elif defined(BSD)
1498         return -ENOTSUP;
1499 #endif
1500 }
1501
1502 static int
1503 rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
1504 {
1505         if (rx_adapter->epd != INIT_FD)
1506                 return 0;
1507
1508         rx_adapter->epd = rxa_epoll_create1();
1509         if (rx_adapter->epd < 0) {
1510                 int err = rx_adapter->epd;
1511                 rx_adapter->epd = INIT_FD;
1512                 RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
1513                 return err;
1514         }
1515
1516         return 0;
1517 }
1518
1519 static int
1520 rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
1521 {
1522         int err;
1523         char thread_name[RTE_MAX_THREAD_NAME_LEN];
1524
1525         if (rx_adapter->intr_ring)
1526                 return 0;
1527
1528         rx_adapter->intr_ring = rte_ring_create("intr_ring",
1529                                         RTE_EVENT_ETH_INTR_RING_SIZE,
1530                                         rte_socket_id(), 0);
1531         if (!rx_adapter->intr_ring)
1532                 return -ENOMEM;
1533
1534         rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
1535                                         RTE_EVENT_ETH_INTR_RING_SIZE *
1536                                         sizeof(struct rte_epoll_event),
1537                                         RTE_CACHE_LINE_SIZE,
1538                                         rx_adapter->socket_id);
1539         if (!rx_adapter->epoll_events) {
1540                 err = -ENOMEM;
1541                 goto error;
1542         }
1543
1544         rte_spinlock_init(&rx_adapter->intr_ring_lock);
1545
1546         snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
1547                         "rx-intr-thread-%d", rx_adapter->id);
1548
1549         err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
1550                                 NULL, rxa_intr_thread, rx_adapter);
1551         if (!err)
1552                 return 0;
1553
1554         RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1555         rte_free(rx_adapter->epoll_events);
1556 error:
1557         rte_ring_free(rx_adapter->intr_ring);
1558         rx_adapter->intr_ring = NULL;
1559         rx_adapter->epoll_events = NULL;
1560         return err;
1561 }
1562
1563 static int
1564 rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
1565 {
1566         int err;
1567
1568         err = pthread_cancel(rx_adapter->rx_intr_thread);
1569         if (err)
1570                 RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
1571                                 err);
1572
1573         err = pthread_join(rx_adapter->rx_intr_thread, NULL);
1574         if (err)
1575                 RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
1576
1577         rte_free(rx_adapter->epoll_events);
1578         rte_ring_free(rx_adapter->intr_ring);
1579         rx_adapter->intr_ring = NULL;
1580         rx_adapter->epoll_events = NULL;
1581         return 0;
1582 }
1583
1584 static int
1585 rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
1586 {
1587         int ret;
1588
1589         if (rx_adapter->num_rx_intr == 0)
1590                 return 0;
1591
1592         ret = rxa_destroy_intr_thread(rx_adapter);
1593         if (ret)
1594                 return ret;
1595
1596         close(rx_adapter->epd);
1597         rx_adapter->epd = INIT_FD;
1598
1599         return ret;
1600 }
1601
1602 static int
1603 rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
1604         struct eth_device_info *dev_info,
1605         uint16_t rx_queue_id)
1606 {
1607         int err;
1608         uint16_t eth_dev_id = dev_info->dev->data->port_id;
1609         int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1610
1611         err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1612         if (err) {
1613                 RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
1614                         rx_queue_id);
1615                 return err;
1616         }
1617
1618         err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1619                                         rx_adapter->epd,
1620                                         RTE_INTR_EVENT_DEL,
1621                                         0);
1622         if (err)
1623                 RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
1624
1625         if (sintr)
1626                 dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
1627         else
1628                 dev_info->shared_intr_enabled = 0;
1629         return err;
1630 }
1631
1632 static int
1633 rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
1634                 struct eth_device_info *dev_info,
1635                 int rx_queue_id)
1636 {
1637         int err;
1638         int i;
1639         int s;
1640
1641         if (dev_info->nb_rx_intr == 0)
1642                 return 0;
1643
1644         err = 0;
1645         if (rx_queue_id == -1) {
1646                 s = dev_info->nb_shared_intr;
1647                 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1648                         int sintr;
1649                         uint16_t q;
1650
1651                         q = dev_info->intr_queue[i];
1652                         sintr = rxa_shared_intr(dev_info, q);
1653                         s -= sintr;
1654
1655                         if (!sintr || s == 0) {
1656
1657                                 err = rxa_disable_intr(rx_adapter, dev_info,
1658                                                 q);
1659                                 if (err)
1660                                         return err;
1661                                 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1662                                                         q);
1663                         }
1664                 }
1665         } else {
1666                 if (!rxa_intr_queue(dev_info, rx_queue_id))
1667                         return 0;
1668                 if (!rxa_shared_intr(dev_info, rx_queue_id) ||
1669                                 dev_info->nb_shared_intr == 1) {
1670                         err = rxa_disable_intr(rx_adapter, dev_info,
1671                                         rx_queue_id);
1672                         if (err)
1673                                 return err;
1674                         rxa_intr_ring_del_entries(rx_adapter, dev_info,
1675                                                 rx_queue_id);
1676                 }
1677
1678                 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1679                         if (dev_info->intr_queue[i] == rx_queue_id) {
1680                                 for (; i < dev_info->nb_rx_intr - 1; i++)
1681                                         dev_info->intr_queue[i] =
1682                                                 dev_info->intr_queue[i + 1];
1683                                 break;
1684                         }
1685                 }
1686         }
1687
1688         return err;
1689 }
1690
1691 static int
1692 rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
1693         struct eth_device_info *dev_info,
1694         uint16_t rx_queue_id)
1695 {
1696         int err, err1;
1697         uint16_t eth_dev_id = dev_info->dev->data->port_id;
1698         union queue_data qd;
1699         int init_fd;
1700         uint16_t *intr_queue;
1701         int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1702
1703         if (rxa_intr_queue(dev_info, rx_queue_id))
1704                 return 0;
1705
1706         intr_queue = dev_info->intr_queue;
1707         if (dev_info->intr_queue == NULL) {
1708                 size_t len =
1709                         dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
1710                 dev_info->intr_queue =
1711                         rte_zmalloc_socket(
1712                                 rx_adapter->mem_name,
1713                                 len,
1714                                 0,
1715                                 rx_adapter->socket_id);
1716                 if (dev_info->intr_queue == NULL)
1717                         return -ENOMEM;
1718         }
1719
1720         init_fd = rx_adapter->epd;
1721         err = rxa_init_epd(rx_adapter);
1722         if (err)
1723                 goto err_free_queue;
1724
1725         qd.port = eth_dev_id;
1726         qd.queue = rx_queue_id;
1727
1728         err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1729                                         rx_adapter->epd,
1730                                         RTE_INTR_EVENT_ADD,
1731                                         qd.ptr);
1732         if (err) {
1733                 RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
1734                         " Rx Queue %u err %d", rx_queue_id, err);
1735                 goto err_del_fd;
1736         }
1737
1738         err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
1739         if (err) {
1740                 RTE_EDEV_LOG_ERR("Could not enable interrupt for"
1741                                 " Rx Queue %u err %d", rx_queue_id, err);
1742
1743                 goto err_del_event;
1744         }
1745
1746         err = rxa_create_intr_thread(rx_adapter);
1747         if (!err)  {
1748                 if (sintr)
1749                         dev_info->shared_intr_enabled = 1;
1750                 else
1751                         dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
1752                 return 0;
1753         }
1754
1755
1756         err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1757         if (err)
1758                 RTE_EDEV_LOG_ERR("Could not disable interrupt for"
1759                                 " Rx Queue %u err %d", rx_queue_id, err);
1760 err_del_event:
1761         err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1762                                         rx_adapter->epd,
1763                                         RTE_INTR_EVENT_DEL,
1764                                         0);
1765         if (err1) {
1766                 RTE_EDEV_LOG_ERR("Could not delete event for"
1767                                 " Rx Queue %u err %d", rx_queue_id, err1);
1768         }
1769 err_del_fd:
1770         if (init_fd == INIT_FD) {
1771                 close(rx_adapter->epd);
1772                 rx_adapter->epd = -1;
1773         }
1774 err_free_queue:
1775         if (intr_queue == NULL)
1776                 rte_free(dev_info->intr_queue);
1777
1778         return err;
1779 }
1780
1781 static int
1782 rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
1783         struct eth_device_info *dev_info,
1784         int rx_queue_id)
1785
1786 {
1787         int i, j, err;
1788         int si = -1;
1789         int shared_done = (dev_info->nb_shared_intr > 0);
1790
1791         if (rx_queue_id != -1) {
1792                 if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
1793                         return 0;
1794                 return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
1795         }
1796
1797         err = 0;
1798         for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
1799
1800                 if (rxa_shared_intr(dev_info, i) && shared_done)
1801                         continue;
1802
1803                 err = rxa_config_intr(rx_adapter, dev_info, i);
1804
1805                 shared_done = err == 0 && rxa_shared_intr(dev_info, i);
1806                 if (shared_done) {
1807                         si = i;
1808                         dev_info->shared_intr_enabled = 1;
1809                 }
1810                 if (err)
1811                         break;
1812         }
1813
1814         if (err == 0)
1815                 return 0;
1816
1817         shared_done = (dev_info->nb_shared_intr > 0);
1818         for (j = 0; j < i; j++) {
1819                 if (rxa_intr_queue(dev_info, j))
1820                         continue;
1821                 if (rxa_shared_intr(dev_info, j) && si != j)
1822                         continue;
1823                 err = rxa_disable_intr(rx_adapter, dev_info, j);
1824                 if (err)
1825                         break;
1826
1827         }
1828
1829         return err;
1830 }
1831
1832
1833 static int
1834 rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
1835 {
1836         int ret;
1837         struct rte_service_spec service;
1838         struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
1839
1840         if (rx_adapter->service_inited)
1841                 return 0;
1842
1843         memset(&service, 0, sizeof(service));
1844         snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
1845                 "rte_event_eth_rx_adapter_%d", id);
1846         service.socket_id = rx_adapter->socket_id;
1847         service.callback = rxa_service_func;
1848         service.callback_userdata = rx_adapter;
1849         /* Service function handles locking for queue add/del updates */
1850         service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
1851         ret = rte_service_component_register(&service, &rx_adapter->service_id);
1852         if (ret) {
1853                 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
1854                         service.name, ret);
1855                 return ret;
1856         }
1857
1858         ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
1859                 &rx_adapter_conf, rx_adapter->conf_arg);
1860         if (ret) {
1861                 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
1862                         ret);
1863                 goto err_done;
1864         }
1865         rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
1866         rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
1867         rx_adapter->service_inited = 1;
1868         rx_adapter->epd = INIT_FD;
1869         return 0;
1870
1871 err_done:
1872         rte_service_component_unregister(rx_adapter->service_id);
1873         return ret;
1874 }
1875
1876 static void
1877 rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
1878                 struct eth_device_info *dev_info,
1879                 int32_t rx_queue_id,
1880                 uint8_t add)
1881 {
1882         struct eth_rx_queue_info *queue_info;
1883         int enabled;
1884         uint16_t i;
1885
1886         if (dev_info->rx_queue == NULL)
1887                 return;
1888
1889         if (rx_queue_id == -1) {
1890                 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
1891                         rxa_update_queue(rx_adapter, dev_info, i, add);
1892         } else {
1893                 queue_info = &dev_info->rx_queue[rx_queue_id];
1894                 enabled = queue_info->queue_enabled;
1895                 if (add) {
1896                         rx_adapter->nb_queues += !enabled;
1897                         dev_info->nb_dev_queues += !enabled;
1898                 } else {
1899                         rx_adapter->nb_queues -= enabled;
1900                         dev_info->nb_dev_queues -= enabled;
1901                 }
1902                 queue_info->queue_enabled = !!add;
1903         }
1904 }
1905
1906 static void
1907 rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
1908                     uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
1909                     uint16_t port_id)
1910 {
1911 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
1912         struct eth_rx_vector_data *vector_data;
1913         uint32_t flow_id;
1914
1915         vector_data = &queue_info->vector_data;
1916         vector_data->max_vector_count = vector_count;
1917         vector_data->port = port_id;
1918         vector_data->queue = qid;
1919         vector_data->vector_pool = mp;
1920         vector_data->vector_timeout_ticks =
1921                 NSEC2TICK(vector_ns, rte_get_timer_hz());
1922         vector_data->ts = 0;
1923         flow_id = queue_info->event & 0xFFFFF;
1924         flow_id =
1925                 flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
1926         vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
1927 }
1928
1929 static void
1930 rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
1931         struct eth_device_info *dev_info,
1932         int32_t rx_queue_id)
1933 {
1934         struct eth_rx_vector_data *vec;
1935         int pollq;
1936         int intrq;
1937         int sintrq;
1938
1939
1940         if (rx_adapter->nb_queues == 0)
1941                 return;
1942
1943         if (rx_queue_id == -1) {
1944                 uint16_t nb_rx_queues;
1945                 uint16_t i;
1946
1947                 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1948                 for (i = 0; i < nb_rx_queues; i++)
1949                         rxa_sw_del(rx_adapter, dev_info, i);
1950                 return;
1951         }
1952
1953         /* Push all the partial event vectors to event device. */
1954         TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1955                 if (vec->queue != rx_queue_id)
1956                         continue;
1957                 rxa_vector_expire(vec, rx_adapter);
1958                 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
1959         }
1960
1961         pollq = rxa_polled_queue(dev_info, rx_queue_id);
1962         intrq = rxa_intr_queue(dev_info, rx_queue_id);
1963         sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1964         rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
1965         rx_adapter->num_rx_polled -= pollq;
1966         dev_info->nb_rx_poll -= pollq;
1967         rx_adapter->num_rx_intr -= intrq;
1968         dev_info->nb_rx_intr -= intrq;
1969         dev_info->nb_shared_intr -= intrq && sintrq;
1970         if (rx_adapter->use_queue_event_buf) {
1971                 struct rte_eth_event_enqueue_buffer *event_buf =
1972                         dev_info->rx_queue[rx_queue_id].event_buf;
1973                 rte_free(event_buf->events);
1974                 rte_free(event_buf);
1975                 dev_info->rx_queue[rx_queue_id].event_buf = NULL;
1976         }
1977 }
1978
1979 static int
1980 rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
1981         struct eth_device_info *dev_info,
1982         int32_t rx_queue_id,
1983         const struct rte_event_eth_rx_adapter_queue_conf *conf)
1984 {
1985         struct eth_rx_queue_info *queue_info;
1986         const struct rte_event *ev = &conf->ev;
1987         int pollq;
1988         int intrq;
1989         int sintrq;
1990         struct rte_event *qi_ev;
1991         struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL;
1992         uint16_t eth_dev_id = dev_info->dev->data->port_id;
1993         int ret;
1994
1995         if (rx_queue_id == -1) {
1996                 uint16_t nb_rx_queues;
1997                 uint16_t i;
1998
1999                 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2000                 for (i = 0; i < nb_rx_queues; i++) {
2001                         ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
2002                         if (ret)
2003                                 return ret;
2004                 }
2005                 return 0;
2006         }
2007
2008         pollq = rxa_polled_queue(dev_info, rx_queue_id);
2009         intrq = rxa_intr_queue(dev_info, rx_queue_id);
2010         sintrq = rxa_shared_intr(dev_info, rx_queue_id);
2011
2012         queue_info = &dev_info->rx_queue[rx_queue_id];
2013         queue_info->wt = conf->servicing_weight;
2014
2015         qi_ev = (struct rte_event *)&queue_info->event;
2016         qi_ev->event = ev->event;
2017         qi_ev->op = RTE_EVENT_OP_NEW;
2018         qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
2019         qi_ev->sub_event_type = 0;
2020
2021         if (conf->rx_queue_flags &
2022                         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
2023                 queue_info->flow_id_mask = ~0;
2024         } else
2025                 qi_ev->flow_id = 0;
2026
2027         if (conf->rx_queue_flags &
2028             RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2029                 queue_info->ena_vector = 1;
2030                 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
2031                 rxa_set_vector_data(queue_info, conf->vector_sz,
2032                                     conf->vector_timeout_ns, conf->vector_mp,
2033                                     rx_queue_id, dev_info->dev->data->port_id);
2034                 rx_adapter->ena_vector = 1;
2035                 rx_adapter->vector_tmo_ticks =
2036                         rx_adapter->vector_tmo_ticks ?
2037                                       RTE_MIN(queue_info->vector_data
2038                                                         .vector_timeout_ticks >>
2039                                                 1,
2040                                         rx_adapter->vector_tmo_ticks) :
2041                                 queue_info->vector_data.vector_timeout_ticks >>
2042                                         1;
2043         }
2044
2045         rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
2046         if (rxa_polled_queue(dev_info, rx_queue_id)) {
2047                 rx_adapter->num_rx_polled += !pollq;
2048                 dev_info->nb_rx_poll += !pollq;
2049                 rx_adapter->num_rx_intr -= intrq;
2050                 dev_info->nb_rx_intr -= intrq;
2051                 dev_info->nb_shared_intr -= intrq && sintrq;
2052         }
2053
2054         if (rxa_intr_queue(dev_info, rx_queue_id)) {
2055                 rx_adapter->num_rx_polled -= pollq;
2056                 dev_info->nb_rx_poll -= pollq;
2057                 rx_adapter->num_rx_intr += !intrq;
2058                 dev_info->nb_rx_intr += !intrq;
2059                 dev_info->nb_shared_intr += !intrq && sintrq;
2060                 if (dev_info->nb_shared_intr == 1) {
2061                         if (dev_info->multi_intr_cap)
2062                                 dev_info->next_q_idx =
2063                                         RTE_MAX_RXTX_INTR_VEC_ID - 1;
2064                         else
2065                                 dev_info->next_q_idx = 0;
2066                 }
2067         }
2068
2069         if (!rx_adapter->use_queue_event_buf)
2070                 return 0;
2071
2072         new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2073                                 sizeof(*new_rx_buf), 0,
2074                                 rte_eth_dev_socket_id(eth_dev_id));
2075         if (new_rx_buf == NULL) {
2076                 RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2077                                  "dev_id: %d queue_id: %d",
2078                                  eth_dev_id, rx_queue_id);
2079                 return -ENOMEM;
2080         }
2081
2082         new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2083         new_rx_buf->events_size += (2 * BATCH_SIZE);
2084         new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2085                                 sizeof(struct rte_event) *
2086                                 new_rx_buf->events_size, 0,
2087                                 rte_eth_dev_socket_id(eth_dev_id));
2088         if (new_rx_buf->events == NULL) {
2089                 rte_free(new_rx_buf);
2090                 RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2091                                  "dev_id: %d queue_id: %d",
2092                                  eth_dev_id, rx_queue_id);
2093                 return -ENOMEM;
2094         }
2095
2096         queue_info->event_buf = new_rx_buf;
2097
2098         return 0;
2099 }
2100
2101 static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
2102                 uint16_t eth_dev_id,
2103                 int rx_queue_id,
2104                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2105 {
2106         struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
2107         struct rte_event_eth_rx_adapter_queue_conf temp_conf;
2108         int ret;
2109         struct eth_rx_poll_entry *rx_poll;
2110         struct eth_rx_queue_info *rx_queue;
2111         uint32_t *rx_wrr;
2112         uint16_t nb_rx_queues;
2113         uint32_t nb_rx_poll, nb_wrr;
2114         uint32_t nb_rx_intr;
2115         int num_intr_vec;
2116         uint16_t wt;
2117
2118         if (queue_conf->servicing_weight == 0) {
2119                 struct rte_eth_dev_data *data = dev_info->dev->data;
2120
2121                 temp_conf = *queue_conf;
2122                 if (!data->dev_conf.intr_conf.rxq) {
2123                         /* If Rx interrupts are disabled set wt = 1 */
2124                         temp_conf.servicing_weight = 1;
2125                 }
2126                 queue_conf = &temp_conf;
2127
2128                 if (queue_conf->servicing_weight == 0 &&
2129                     rx_adapter->use_queue_event_buf) {
2130
2131                         RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2132                                          "not supported for interrupt queues "
2133                                          "dev_id: %d queue_id: %d",
2134                                          eth_dev_id, rx_queue_id);
2135                         return -EINVAL;
2136                 }
2137         }
2138
2139         nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2140         rx_queue = dev_info->rx_queue;
2141         wt = queue_conf->servicing_weight;
2142
2143         if (dev_info->rx_queue == NULL) {
2144                 dev_info->rx_queue =
2145                     rte_zmalloc_socket(rx_adapter->mem_name,
2146                                        nb_rx_queues *
2147                                        sizeof(struct eth_rx_queue_info), 0,
2148                                        rx_adapter->socket_id);
2149                 if (dev_info->rx_queue == NULL)
2150                         return -ENOMEM;
2151         }
2152         rx_wrr = NULL;
2153         rx_poll = NULL;
2154
2155         rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
2156                         queue_conf->servicing_weight,
2157                         &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2158
2159         if (dev_info->dev->intr_handle)
2160                 dev_info->multi_intr_cap =
2161                         rte_intr_cap_multiple(dev_info->dev->intr_handle);
2162
2163         ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2164                                 &rx_poll, &rx_wrr);
2165         if (ret)
2166                 goto err_free_rxqueue;
2167
2168         if (wt == 0) {
2169                 num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
2170
2171                 ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
2172                 if (ret)
2173                         goto err_free_rxqueue;
2174
2175                 ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
2176                 if (ret)
2177                         goto err_free_rxqueue;
2178         } else {
2179
2180                 num_intr_vec = 0;
2181                 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2182                         num_intr_vec = rxa_nb_intr_vect(dev_info,
2183                                                 rx_queue_id, 0);
2184                         /* interrupt based queues are being converted to
2185                          * poll mode queues, delete the interrupt configuration
2186                          * for those.
2187                          */
2188                         ret = rxa_del_intr_queue(rx_adapter,
2189                                                 dev_info, rx_queue_id);
2190                         if (ret)
2191                                 goto err_free_rxqueue;
2192                 }
2193         }
2194
2195         if (nb_rx_intr == 0) {
2196                 ret = rxa_free_intr_resources(rx_adapter);
2197                 if (ret)
2198                         goto err_free_rxqueue;
2199         }
2200
2201         if (wt == 0) {
2202                 uint16_t i;
2203
2204                 if (rx_queue_id  == -1) {
2205                         for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
2206                                 dev_info->intr_queue[i] = i;
2207                 } else {
2208                         if (!rxa_intr_queue(dev_info, rx_queue_id))
2209                                 dev_info->intr_queue[nb_rx_intr - 1] =
2210                                         rx_queue_id;
2211                 }
2212         }
2213
2214
2215
2216         ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2217         if (ret)
2218                 goto err_free_rxqueue;
2219         rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2220
2221         rte_free(rx_adapter->eth_rx_poll);
2222         rte_free(rx_adapter->wrr_sched);
2223
2224         rx_adapter->eth_rx_poll = rx_poll;
2225         rx_adapter->wrr_sched = rx_wrr;
2226         rx_adapter->wrr_len = nb_wrr;
2227         rx_adapter->num_intr_vec += num_intr_vec;
2228         return 0;
2229
2230 err_free_rxqueue:
2231         if (rx_queue == NULL) {
2232                 rte_free(dev_info->rx_queue);
2233                 dev_info->rx_queue = NULL;
2234         }
2235
2236         rte_free(rx_poll);
2237         rte_free(rx_wrr);
2238
2239         return ret;
2240 }
2241
2242 static int
2243 rxa_ctrl(uint8_t id, int start)
2244 {
2245         struct rte_event_eth_rx_adapter *rx_adapter;
2246         struct rte_eventdev *dev;
2247         struct eth_device_info *dev_info;
2248         uint32_t i;
2249         int use_service = 0;
2250         int stop = !start;
2251
2252         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2253         rx_adapter = rxa_id_to_adapter(id);
2254         if (rx_adapter == NULL)
2255                 return -EINVAL;
2256
2257         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2258
2259         RTE_ETH_FOREACH_DEV(i) {
2260                 dev_info = &rx_adapter->eth_devices[i];
2261                 /* if start  check for num dev queues */
2262                 if (start && !dev_info->nb_dev_queues)
2263                         continue;
2264                 /* if stop check if dev has been started */
2265                 if (stop && !dev_info->dev_rx_started)
2266                         continue;
2267                 use_service |= !dev_info->internal_event_port;
2268                 dev_info->dev_rx_started = start;
2269                 if (dev_info->internal_event_port == 0)
2270                         continue;
2271                 start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
2272                                                 &rte_eth_devices[i]) :
2273                         (*dev->dev_ops->eth_rx_adapter_stop)(dev,
2274                                                 &rte_eth_devices[i]);
2275         }
2276
2277         if (use_service) {
2278                 rte_spinlock_lock(&rx_adapter->rx_lock);
2279                 rx_adapter->rxa_started = start;
2280                 rte_service_runstate_set(rx_adapter->service_id, start);
2281                 rte_spinlock_unlock(&rx_adapter->rx_lock);
2282         }
2283
2284         return 0;
2285 }
2286
2287 static int
2288 rxa_create(uint8_t id, uint8_t dev_id,
2289            struct rte_event_eth_rx_adapter_params *rxa_params,
2290            rte_event_eth_rx_adapter_conf_cb conf_cb,
2291            void *conf_arg)
2292 {
2293         struct rte_event_eth_rx_adapter *rx_adapter;
2294         struct rte_eth_event_enqueue_buffer *buf;
2295         struct rte_event *events;
2296         int ret;
2297         int socket_id;
2298         uint16_t i;
2299         char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
2300         const uint8_t default_rss_key[] = {
2301                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
2302                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
2303                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
2304                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
2305                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
2306         };
2307
2308         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2309         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2310
2311         if (conf_cb == NULL)
2312                 return -EINVAL;
2313
2314         if (event_eth_rx_adapter == NULL) {
2315                 ret = rte_event_eth_rx_adapter_init();
2316                 if (ret)
2317                         return ret;
2318         }
2319
2320         rx_adapter = rxa_id_to_adapter(id);
2321         if (rx_adapter != NULL) {
2322                 RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
2323                 return -EEXIST;
2324         }
2325
2326         socket_id = rte_event_dev_socket_id(dev_id);
2327         snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
2328                 "rte_event_eth_rx_adapter_%d",
2329                 id);
2330
2331         rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
2332                         RTE_CACHE_LINE_SIZE, socket_id);
2333         if (rx_adapter == NULL) {
2334                 RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
2335                 return -ENOMEM;
2336         }
2337
2338         rx_adapter->eventdev_id = dev_id;
2339         rx_adapter->socket_id = socket_id;
2340         rx_adapter->conf_cb = conf_cb;
2341         rx_adapter->conf_arg = conf_arg;
2342         rx_adapter->id = id;
2343         TAILQ_INIT(&rx_adapter->vector_list);
2344         strcpy(rx_adapter->mem_name, mem_name);
2345         rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
2346                                         RTE_MAX_ETHPORTS *
2347                                         sizeof(struct eth_device_info), 0,
2348                                         socket_id);
2349         rte_convert_rss_key((const uint32_t *)default_rss_key,
2350                         (uint32_t *)rx_adapter->rss_key_be,
2351                             RTE_DIM(default_rss_key));
2352
2353         if (rx_adapter->eth_devices == NULL) {
2354                 RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
2355                 rte_free(rx_adapter);
2356                 return -ENOMEM;
2357         }
2358
2359         rte_spinlock_init(&rx_adapter->rx_lock);
2360
2361         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
2362                 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
2363
2364         /* Rx adapter event buffer allocation */
2365         rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2366
2367         if (!rx_adapter->use_queue_event_buf) {
2368                 buf = &rx_adapter->event_enqueue_buffer;
2369                 buf->events_size = rxa_params->event_buf_size;
2370
2371                 events = rte_zmalloc_socket(rx_adapter->mem_name,
2372                                             buf->events_size * sizeof(*events),
2373                                             0, socket_id);
2374                 if (events == NULL) {
2375                         RTE_EDEV_LOG_ERR("Failed to allocate memory "
2376                                          "for adapter event buffer");
2377                         rte_free(rx_adapter->eth_devices);
2378                         rte_free(rx_adapter);
2379                         return -ENOMEM;
2380                 }
2381
2382                 rx_adapter->event_enqueue_buffer.events = events;
2383         }
2384
2385         event_eth_rx_adapter[id] = rx_adapter;
2386
2387         if (conf_cb == rxa_default_conf_cb)
2388                 rx_adapter->default_cb_arg = 1;
2389
2390         if (rte_mbuf_dyn_rx_timestamp_register(
2391                         &event_eth_rx_timestamp_dynfield_offset,
2392                         &event_eth_rx_timestamp_dynflag) != 0) {
2393                 RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
2394                 return -rte_errno;
2395         }
2396
2397         rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
2398                 conf_arg);
2399         return 0;
2400 }
2401
2402 int
2403 rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2404                                 rte_event_eth_rx_adapter_conf_cb conf_cb,
2405                                 void *conf_arg)
2406 {
2407         struct rte_event_eth_rx_adapter_params rxa_params = {0};
2408
2409         /* use default values for adapter params */
2410         rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2411         rxa_params.use_queue_event_buf = false;
2412
2413         return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2414 }
2415
2416 int
2417 rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2418                         struct rte_event_port_conf *port_config,
2419                         struct rte_event_eth_rx_adapter_params *rxa_params)
2420 {
2421         struct rte_event_port_conf *pc;
2422         int ret;
2423         struct rte_event_eth_rx_adapter_params temp_params = {0};
2424
2425         if (port_config == NULL)
2426                 return -EINVAL;
2427
2428         if (rxa_params == NULL) {
2429                 /* use default values if rxa_params is NULL */
2430                 rxa_params = &temp_params;
2431                 rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2432                 rxa_params->use_queue_event_buf = false;
2433         } else if ((!rxa_params->use_queue_event_buf &&
2434                     rxa_params->event_buf_size == 0) ||
2435                    (rxa_params->use_queue_event_buf &&
2436                     rxa_params->event_buf_size != 0)) {
2437                 RTE_EDEV_LOG_ERR("Invalid adapter params\n");
2438                 return -EINVAL;
2439         } else if (!rxa_params->use_queue_event_buf) {
2440                 /* adjust event buff size with BATCH_SIZE used for fetching
2441                  * packets from NIC rx queues to get full buffer utilization
2442                  * and prevent unnecessary rollovers.
2443                  */
2444
2445                 rxa_params->event_buf_size =
2446                         RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
2447                 rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2448         }
2449
2450         pc = rte_malloc(NULL, sizeof(*pc), 0);
2451         if (pc == NULL)
2452                 return -ENOMEM;
2453
2454         *pc = *port_config;
2455
2456         ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2457         if (ret)
2458                 rte_free(pc);
2459
2460         return ret;
2461 }
2462
2463 int
2464 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
2465                 struct rte_event_port_conf *port_config)
2466 {
2467         struct rte_event_port_conf *pc;
2468         int ret;
2469
2470         if (port_config == NULL)
2471                 return -EINVAL;
2472
2473         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2474
2475         pc = rte_malloc(NULL, sizeof(*pc), 0);
2476         if (pc == NULL)
2477                 return -ENOMEM;
2478         *pc = *port_config;
2479
2480         ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
2481                                         rxa_default_conf_cb,
2482                                         pc);
2483         if (ret)
2484                 rte_free(pc);
2485         return ret;
2486 }
2487
2488 int
2489 rte_event_eth_rx_adapter_free(uint8_t id)
2490 {
2491         struct rte_event_eth_rx_adapter *rx_adapter;
2492
2493         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2494
2495         rx_adapter = rxa_id_to_adapter(id);
2496         if (rx_adapter == NULL)
2497                 return -EINVAL;
2498
2499         if (rx_adapter->nb_queues) {
2500                 RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
2501                                 rx_adapter->nb_queues);
2502                 return -EBUSY;
2503         }
2504
2505         if (rx_adapter->default_cb_arg)
2506                 rte_free(rx_adapter->conf_arg);
2507         rte_free(rx_adapter->eth_devices);
2508         if (!rx_adapter->use_queue_event_buf)
2509                 rte_free(rx_adapter->event_enqueue_buffer.events);
2510         rte_free(rx_adapter);
2511         event_eth_rx_adapter[id] = NULL;
2512
2513         rte_eventdev_trace_eth_rx_adapter_free(id);
2514         return 0;
2515 }
2516
2517 int
2518 rte_event_eth_rx_adapter_queue_add(uint8_t id,
2519                 uint16_t eth_dev_id,
2520                 int32_t rx_queue_id,
2521                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2522 {
2523         int ret;
2524         uint32_t cap;
2525         struct rte_event_eth_rx_adapter *rx_adapter;
2526         struct rte_eventdev *dev;
2527         struct eth_device_info *dev_info;
2528         struct rte_event_eth_rx_adapter_vector_limits limits;
2529
2530         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2531         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2532
2533         rx_adapter = rxa_id_to_adapter(id);
2534         if ((rx_adapter == NULL) || (queue_conf == NULL))
2535                 return -EINVAL;
2536
2537         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2538         ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2539                                                 eth_dev_id,
2540                                                 &cap);
2541         if (ret) {
2542                 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2543                         "eth port %" PRIu16, id, eth_dev_id);
2544                 return ret;
2545         }
2546
2547         if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
2548                 && (queue_conf->rx_queue_flags &
2549                         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
2550                 RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
2551                                 " eth port: %" PRIu16 " adapter id: %" PRIu8,
2552                                 eth_dev_id, id);
2553                 return -EINVAL;
2554         }
2555
2556         if (queue_conf->rx_queue_flags &
2557             RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2558
2559                 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
2560                         RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2561                                          " eth port: %" PRIu16
2562                                          " adapter id: %" PRIu8,
2563                                          eth_dev_id, id);
2564                         return -EINVAL;
2565                 }
2566
2567                 ret = rte_event_eth_rx_adapter_vector_limits_get(
2568                         rx_adapter->eventdev_id, eth_dev_id, &limits);
2569                 if (ret < 0) {
2570                         RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2571                                          " eth port: %" PRIu16
2572                                          " adapter id: %" PRIu8,
2573                                          eth_dev_id, id);
2574                         return -EINVAL;
2575                 }
2576                 if (queue_conf->vector_sz < limits.min_sz ||
2577                     queue_conf->vector_sz > limits.max_sz ||
2578                     queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2579                     queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2580                     queue_conf->vector_mp == NULL) {
2581                         RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2582                                          " eth port: %" PRIu16
2583                                          " adapter id: %" PRIu8,
2584                                          eth_dev_id, id);
2585                         return -EINVAL;
2586                 }
2587                 if (queue_conf->vector_mp->elt_size <
2588                     (sizeof(struct rte_event_vector) +
2589                      (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2590                         RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2591                                          " eth port: %" PRIu16
2592                                          " adapter id: %" PRIu8,
2593                                          eth_dev_id, id);
2594                         return -EINVAL;
2595                 }
2596         }
2597
2598         if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
2599                 (rx_queue_id != -1)) {
2600                 RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
2601                         "event queue, eth port: %" PRIu16 " adapter id: %"
2602                         PRIu8, eth_dev_id, id);
2603                 return -EINVAL;
2604         }
2605
2606         if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2607                         rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2608                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2609                          (uint16_t)rx_queue_id);
2610                 return -EINVAL;
2611         }
2612
2613         if ((rx_adapter->use_queue_event_buf &&
2614              queue_conf->event_buf_size == 0) ||
2615             (!rx_adapter->use_queue_event_buf &&
2616              queue_conf->event_buf_size != 0)) {
2617                 RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2618                 return -EINVAL;
2619         }
2620
2621         dev_info = &rx_adapter->eth_devices[eth_dev_id];
2622
2623         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2624                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
2625                                         -ENOTSUP);
2626                 if (dev_info->rx_queue == NULL) {
2627                         dev_info->rx_queue =
2628                             rte_zmalloc_socket(rx_adapter->mem_name,
2629                                         dev_info->dev->data->nb_rx_queues *
2630                                         sizeof(struct eth_rx_queue_info), 0,
2631                                         rx_adapter->socket_id);
2632                         if (dev_info->rx_queue == NULL)
2633                                 return -ENOMEM;
2634                 }
2635
2636                 ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
2637                                 &rte_eth_devices[eth_dev_id],
2638                                 rx_queue_id, queue_conf);
2639                 if (ret == 0) {
2640                         dev_info->internal_event_port = 1;
2641                         rxa_update_queue(rx_adapter,
2642                                         &rx_adapter->eth_devices[eth_dev_id],
2643                                         rx_queue_id,
2644                                         1);
2645                 }
2646         } else {
2647                 rte_spinlock_lock(&rx_adapter->rx_lock);
2648                 dev_info->internal_event_port = 0;
2649                 ret = rxa_init_service(rx_adapter, id);
2650                 if (ret == 0) {
2651                         uint32_t service_id = rx_adapter->service_id;
2652                         ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
2653                                         queue_conf);
2654                         rte_service_component_runstate_set(service_id,
2655                                 rxa_sw_adapter_queue_count(rx_adapter));
2656                 }
2657                 rte_spinlock_unlock(&rx_adapter->rx_lock);
2658         }
2659
2660         rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
2661                 rx_queue_id, queue_conf, ret);
2662         if (ret)
2663                 return ret;
2664
2665         return 0;
2666 }
2667
2668 static int
2669 rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
2670 {
2671         limits->max_sz = MAX_VECTOR_SIZE;
2672         limits->min_sz = MIN_VECTOR_SIZE;
2673         limits->max_timeout_ns = MAX_VECTOR_NS;
2674         limits->min_timeout_ns = MIN_VECTOR_NS;
2675
2676         return 0;
2677 }
2678
2679 int
2680 rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
2681                                 int32_t rx_queue_id)
2682 {
2683         int ret = 0;
2684         struct rte_eventdev *dev;
2685         struct rte_event_eth_rx_adapter *rx_adapter;
2686         struct eth_device_info *dev_info;
2687         uint32_t cap;
2688         uint32_t nb_rx_poll = 0;
2689         uint32_t nb_wrr = 0;
2690         uint32_t nb_rx_intr;
2691         struct eth_rx_poll_entry *rx_poll = NULL;
2692         uint32_t *rx_wrr = NULL;
2693         int num_intr_vec;
2694
2695         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2696         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2697
2698         rx_adapter = rxa_id_to_adapter(id);
2699         if (rx_adapter == NULL)
2700                 return -EINVAL;
2701
2702         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2703         ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2704                                                 eth_dev_id,
2705                                                 &cap);
2706         if (ret)
2707                 return ret;
2708
2709         if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2710                 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2711                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2712                          (uint16_t)rx_queue_id);
2713                 return -EINVAL;
2714         }
2715
2716         dev_info = &rx_adapter->eth_devices[eth_dev_id];
2717
2718         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2719                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
2720                                  -ENOTSUP);
2721                 ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
2722                                                 &rte_eth_devices[eth_dev_id],
2723                                                 rx_queue_id);
2724                 if (ret == 0) {
2725                         rxa_update_queue(rx_adapter,
2726                                         &rx_adapter->eth_devices[eth_dev_id],
2727                                         rx_queue_id,
2728                                         0);
2729                         if (dev_info->nb_dev_queues == 0) {
2730                                 rte_free(dev_info->rx_queue);
2731                                 dev_info->rx_queue = NULL;
2732                         }
2733                 }
2734         } else {
2735                 rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
2736                         &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2737
2738                 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2739                         &rx_poll, &rx_wrr);
2740                 if (ret)
2741                         return ret;
2742
2743                 rte_spinlock_lock(&rx_adapter->rx_lock);
2744
2745                 num_intr_vec = 0;
2746                 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2747
2748                         num_intr_vec = rxa_nb_intr_vect(dev_info,
2749                                                 rx_queue_id, 0);
2750                         ret = rxa_del_intr_queue(rx_adapter, dev_info,
2751                                         rx_queue_id);
2752                         if (ret)
2753                                 goto unlock_ret;
2754                 }
2755
2756                 if (nb_rx_intr == 0) {
2757                         ret = rxa_free_intr_resources(rx_adapter);
2758                         if (ret)
2759                                 goto unlock_ret;
2760                 }
2761
2762                 rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
2763                 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2764
2765                 rte_free(rx_adapter->eth_rx_poll);
2766                 rte_free(rx_adapter->wrr_sched);
2767
2768                 if (nb_rx_intr == 0) {
2769                         rte_free(dev_info->intr_queue);
2770                         dev_info->intr_queue = NULL;
2771                 }
2772
2773                 rx_adapter->eth_rx_poll = rx_poll;
2774                 rx_adapter->wrr_sched = rx_wrr;
2775                 rx_adapter->wrr_len = nb_wrr;
2776                 rx_adapter->num_intr_vec += num_intr_vec;
2777
2778                 if (dev_info->nb_dev_queues == 0) {
2779                         rte_free(dev_info->rx_queue);
2780                         dev_info->rx_queue = NULL;
2781                 }
2782 unlock_ret:
2783                 rte_spinlock_unlock(&rx_adapter->rx_lock);
2784                 if (ret) {
2785                         rte_free(rx_poll);
2786                         rte_free(rx_wrr);
2787                         return ret;
2788                 }
2789
2790                 rte_service_component_runstate_set(rx_adapter->service_id,
2791                                 rxa_sw_adapter_queue_count(rx_adapter));
2792         }
2793
2794         rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
2795                 rx_queue_id, ret);
2796         return ret;
2797 }
2798
2799 int
2800 rte_event_eth_rx_adapter_vector_limits_get(
2801         uint8_t dev_id, uint16_t eth_port_id,
2802         struct rte_event_eth_rx_adapter_vector_limits *limits)
2803 {
2804         struct rte_eventdev *dev;
2805         uint32_t cap;
2806         int ret;
2807
2808         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2809         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
2810
2811         if (limits == NULL)
2812                 return -EINVAL;
2813
2814         dev = &rte_eventdevs[dev_id];
2815
2816         ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
2817         if (ret) {
2818                 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2819                                  "eth port %" PRIu16,
2820                                  dev_id, eth_port_id);
2821                 return ret;
2822         }
2823
2824         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2825                 RTE_FUNC_PTR_OR_ERR_RET(
2826                         *dev->dev_ops->eth_rx_adapter_vector_limits_get,
2827                         -ENOTSUP);
2828                 ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
2829                         dev, &rte_eth_devices[eth_port_id], limits);
2830         } else {
2831                 ret = rxa_sw_vector_limits(limits);
2832         }
2833
2834         return ret;
2835 }
2836
2837 int
2838 rte_event_eth_rx_adapter_start(uint8_t id)
2839 {
2840         rte_eventdev_trace_eth_rx_adapter_start(id);
2841         return rxa_ctrl(id, 1);
2842 }
2843
2844 int
2845 rte_event_eth_rx_adapter_stop(uint8_t id)
2846 {
2847         rte_eventdev_trace_eth_rx_adapter_stop(id);
2848         return rxa_ctrl(id, 0);
2849 }
2850
2851 int
2852 rte_event_eth_rx_adapter_stats_get(uint8_t id,
2853                                struct rte_event_eth_rx_adapter_stats *stats)
2854 {
2855         struct rte_event_eth_rx_adapter *rx_adapter;
2856         struct rte_eth_event_enqueue_buffer *buf;
2857         struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
2858         struct rte_event_eth_rx_adapter_stats dev_stats;
2859         struct rte_eventdev *dev;
2860         struct eth_device_info *dev_info;
2861         uint32_t i;
2862         int ret;
2863
2864         if (rxa_memzone_lookup())
2865                 return -ENOMEM;
2866
2867         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2868
2869         rx_adapter = rxa_id_to_adapter(id);
2870         if (rx_adapter  == NULL || stats == NULL)
2871                 return -EINVAL;
2872
2873         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2874         memset(stats, 0, sizeof(*stats));
2875         RTE_ETH_FOREACH_DEV(i) {
2876                 dev_info = &rx_adapter->eth_devices[i];
2877                 if (dev_info->internal_event_port == 0 ||
2878                         dev->dev_ops->eth_rx_adapter_stats_get == NULL)
2879                         continue;
2880                 ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
2881                                                 &rte_eth_devices[i],
2882                                                 &dev_stats);
2883                 if (ret)
2884                         continue;
2885                 dev_stats_sum.rx_packets += dev_stats.rx_packets;
2886                 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
2887         }
2888
2889         if (rx_adapter->service_inited)
2890                 *stats = rx_adapter->stats;
2891
2892         stats->rx_packets += dev_stats_sum.rx_packets;
2893         stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2894
2895         if (!rx_adapter->use_queue_event_buf) {
2896                 buf = &rx_adapter->event_enqueue_buffer;
2897                 stats->rx_event_buf_count = buf->count;
2898                 stats->rx_event_buf_size = buf->events_size;
2899         } else {
2900                 stats->rx_event_buf_count = 0;
2901                 stats->rx_event_buf_size = 0;
2902         }
2903
2904         return 0;
2905 }
2906
2907 int
2908 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
2909 {
2910         struct rte_event_eth_rx_adapter *rx_adapter;
2911         struct rte_eventdev *dev;
2912         struct eth_device_info *dev_info;
2913         uint32_t i;
2914
2915         if (rxa_memzone_lookup())
2916                 return -ENOMEM;
2917
2918         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2919
2920         rx_adapter = rxa_id_to_adapter(id);
2921         if (rx_adapter == NULL)
2922                 return -EINVAL;
2923
2924         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2925         RTE_ETH_FOREACH_DEV(i) {
2926                 dev_info = &rx_adapter->eth_devices[i];
2927                 if (dev_info->internal_event_port == 0 ||
2928                         dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
2929                         continue;
2930                 (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
2931                                                         &rte_eth_devices[i]);
2932         }
2933
2934         memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
2935         return 0;
2936 }
2937
2938 int
2939 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
2940 {
2941         struct rte_event_eth_rx_adapter *rx_adapter;
2942
2943         if (rxa_memzone_lookup())
2944                 return -ENOMEM;
2945
2946         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2947
2948         rx_adapter = rxa_id_to_adapter(id);
2949         if (rx_adapter == NULL || service_id == NULL)
2950                 return -EINVAL;
2951
2952         if (rx_adapter->service_inited)
2953                 *service_id = rx_adapter->service_id;
2954
2955         return rx_adapter->service_inited ? 0 : -ESRCH;
2956 }
2957
2958 int
2959 rte_event_eth_rx_adapter_cb_register(uint8_t id,
2960                                         uint16_t eth_dev_id,
2961                                         rte_event_eth_rx_adapter_cb_fn cb_fn,
2962                                         void *cb_arg)
2963 {
2964         struct rte_event_eth_rx_adapter *rx_adapter;
2965         struct eth_device_info *dev_info;
2966         uint32_t cap;
2967         int ret;
2968
2969         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2970         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2971
2972         rx_adapter = rxa_id_to_adapter(id);
2973         if (rx_adapter == NULL)
2974                 return -EINVAL;
2975
2976         dev_info = &rx_adapter->eth_devices[eth_dev_id];
2977         if (dev_info->rx_queue == NULL)
2978                 return -EINVAL;
2979
2980         ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2981                                                 eth_dev_id,
2982                                                 &cap);
2983         if (ret) {
2984                 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2985                         "eth port %" PRIu16, id, eth_dev_id);
2986                 return ret;
2987         }
2988
2989         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2990                 RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
2991                                 PRIu16, eth_dev_id);
2992                 return -EINVAL;
2993         }
2994
2995         rte_spinlock_lock(&rx_adapter->rx_lock);
2996         dev_info->cb_fn = cb_fn;
2997         dev_info->cb_arg = cb_arg;
2998         rte_spinlock_unlock(&rx_adapter->rx_lock);
2999
3000         return 0;
3001 }
3002
3003 int
3004 rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
3005                         uint16_t eth_dev_id,
3006                         uint16_t rx_queue_id,
3007                         struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
3008 {
3009         struct rte_eventdev *dev;
3010         struct rte_event_eth_rx_adapter *rx_adapter;
3011         struct eth_device_info *dev_info;
3012         struct eth_rx_queue_info *queue_info;
3013         struct rte_event *qi_ev;
3014         int ret;
3015
3016         if (rxa_memzone_lookup())
3017                 return -ENOMEM;
3018
3019         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3020         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3021
3022         if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3023                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3024                 return -EINVAL;
3025         }
3026
3027         if (queue_conf == NULL) {
3028                 RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3029                 return -EINVAL;
3030         }
3031
3032         rx_adapter = rxa_id_to_adapter(id);
3033         if (rx_adapter == NULL)
3034                 return -EINVAL;
3035
3036         dev_info = &rx_adapter->eth_devices[eth_dev_id];
3037         if (dev_info->rx_queue == NULL ||
3038             !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3039                 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3040                 return -EINVAL;
3041         }
3042
3043         queue_info = &dev_info->rx_queue[rx_queue_id];
3044         qi_ev = (struct rte_event *)&queue_info->event;
3045
3046         memset(queue_conf, 0, sizeof(*queue_conf));
3047         queue_conf->rx_queue_flags = 0;
3048         if (queue_info->flow_id_mask != 0)
3049                 queue_conf->rx_queue_flags |=
3050                         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3051         queue_conf->servicing_weight = queue_info->wt;
3052
3053         memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
3054
3055         dev = &rte_eventdevs[rx_adapter->eventdev_id];
3056         if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3057                 ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3058                                                 &rte_eth_devices[eth_dev_id],
3059                                                 rx_queue_id,
3060                                                 queue_conf);
3061                 return ret;
3062         }
3063
3064         return 0;
3065 }
3066
3067 #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
3068
3069 static int
3070 handle_rxa_stats(const char *cmd __rte_unused,
3071                  const char *params,
3072                  struct rte_tel_data *d)
3073 {
3074         uint8_t rx_adapter_id;
3075         struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3076
3077         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3078                 return -1;
3079
3080         /* Get Rx adapter ID from parameter string */
3081         rx_adapter_id = atoi(params);
3082         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3083
3084         /* Get Rx adapter stats */
3085         if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3086                                                &rx_adptr_stats)) {
3087                 RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
3088                 return -1;
3089         }
3090
3091         rte_tel_data_start_dict(d);
3092         rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3093         RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3094         RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3095         RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3096         RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3097         RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3098         RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3099         RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3100         RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3101         RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3102         RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3103         RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3104
3105         return 0;
3106 }
3107
3108 static int
3109 handle_rxa_stats_reset(const char *cmd __rte_unused,
3110                        const char *params,
3111                        struct rte_tel_data *d __rte_unused)
3112 {
3113         uint8_t rx_adapter_id;
3114
3115         if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
3116                 return -1;
3117
3118         /* Get Rx adapter ID from parameter string */
3119         rx_adapter_id = atoi(params);
3120         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3121
3122         /* Reset Rx adapter stats */
3123         if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3124                 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
3125                 return -1;
3126         }
3127
3128         return 0;
3129 }
3130
3131 static int
3132 handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3133                           const char *params,
3134                           struct rte_tel_data *d)
3135 {
3136         uint8_t rx_adapter_id;
3137         uint16_t rx_queue_id;
3138         int eth_dev_id;
3139         char *token, *l_params;
3140         struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3141
3142         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3143                 return -1;
3144
3145         /* Get Rx adapter ID from parameter string */
3146         l_params = strdup(params);
3147         token = strtok(l_params, ",");
3148         rx_adapter_id = strtoul(token, NULL, 10);
3149         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3150
3151         token = strtok(NULL, ",");
3152         if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3153                 return -1;
3154
3155         /* Get device ID from parameter string */
3156         eth_dev_id = strtoul(token, NULL, 10);
3157         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3158
3159         token = strtok(NULL, ",");
3160         if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3161                 return -1;
3162
3163         /* Get Rx queue ID from parameter string */
3164         rx_queue_id = strtoul(token, NULL, 10);
3165         if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3166                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3167                 return -EINVAL;
3168         }
3169
3170         token = strtok(NULL, "\0");
3171         if (token != NULL)
3172                 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3173                                  " telemetry command, igrnoring");
3174
3175         if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3176                                                     rx_queue_id, &queue_conf)) {
3177                 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3178                 return -1;
3179         }
3180
3181         rte_tel_data_start_dict(d);
3182         rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3183         rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3184         rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3185         RXA_ADD_DICT(queue_conf, rx_queue_flags);
3186         RXA_ADD_DICT(queue_conf, servicing_weight);
3187         RXA_ADD_DICT(queue_conf.ev, queue_id);
3188         RXA_ADD_DICT(queue_conf.ev, sched_type);
3189         RXA_ADD_DICT(queue_conf.ev, priority);
3190         RXA_ADD_DICT(queue_conf.ev, flow_id);
3191
3192         return 0;
3193 }
3194
3195 RTE_INIT(rxa_init_telemetry)
3196 {
3197         rte_telemetry_register_cmd("/eventdev/rxa_stats",
3198                 handle_rxa_stats,
3199                 "Returns Rx adapter stats. Parameter: rxa_id");
3200
3201         rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3202                 handle_rxa_stats_reset,
3203                 "Reset Rx adapter stats. Parameter: rxa_id");
3204
3205         rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3206                 handle_rxa_get_queue_conf,
3207                 "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
3208 }