3adec52eac1042f9ccd0aa720acc7dcefc899481
[dpdk.git] / lib / eventdev / rte_event_eth_rx_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation.
3  * All rights reserved.
4  */
5 #if defined(LINUX)
6 #include <sys/epoll.h>
7 #endif
8 #include <unistd.h>
9
10 #include <rte_cycles.h>
11 #include <rte_common.h>
12 #include <rte_dev.h>
13 #include <rte_errno.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_service_component.h>
18 #include <rte_thash.h>
19 #include <rte_interrupts.h>
20 #include <rte_mbuf_dyn.h>
21 #include <rte_telemetry.h>
22
23 #include "rte_eventdev.h"
24 #include "eventdev_pmd.h"
25 #include "eventdev_trace.h"
26 #include "rte_event_eth_rx_adapter.h"
27
28 #define BATCH_SIZE              32
29 #define BLOCK_CNT_THRESHOLD     10
30 #define ETH_EVENT_BUFFER_SIZE   (6*BATCH_SIZE)
31 #define MAX_VECTOR_SIZE         1024
32 #define MIN_VECTOR_SIZE         4
33 #define MAX_VECTOR_NS           1E9
34 #define MIN_VECTOR_NS           1E5
35
36 #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
37 #define ETH_RX_ADAPTER_MEM_NAME_LEN     32
38
39 #define RSS_KEY_SIZE    40
40 /* value written to intr thread pipe to signal thread exit */
41 #define ETH_BRIDGE_INTR_THREAD_EXIT     1
42 /* Sentinel value to detect initialized file handle */
43 #define INIT_FD         -1
44
45 #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
46
47 /*
48  * Used to store port and queue ID of interrupting Rx queue
49  */
50 union queue_data {
51         RTE_STD_C11
52         void *ptr;
53         struct {
54                 uint16_t port;
55                 uint16_t queue;
56         };
57 };
58
59 /*
60  * There is an instance of this struct per polled Rx queue added to the
61  * adapter
62  */
63 struct eth_rx_poll_entry {
64         /* Eth port to poll */
65         uint16_t eth_dev_id;
66         /* Eth rx queue to poll */
67         uint16_t eth_rx_qid;
68 };
69
70 struct eth_rx_vector_data {
71         TAILQ_ENTRY(eth_rx_vector_data) next;
72         uint16_t port;
73         uint16_t queue;
74         uint16_t max_vector_count;
75         uint64_t event;
76         uint64_t ts;
77         uint64_t vector_timeout_ticks;
78         struct rte_mempool *vector_pool;
79         struct rte_event_vector *vector_ev;
80 } __rte_cache_aligned;
81
82 TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
83
84 /* Instance per adapter */
85 struct eth_event_enqueue_buffer {
86         /* Count of events in this buffer */
87         uint16_t count;
88         /* Array of events in this buffer */
89         struct rte_event *events;
90         /* size of event buffer */
91         uint16_t events_size;
92         /* Event enqueue happens from head */
93         uint16_t head;
94         /* New packets from rte_eth_rx_burst is enqued from tail */
95         uint16_t tail;
96         /* last element in the buffer before rollover */
97         uint16_t last;
98         uint16_t last_mask;
99 };
100
101 struct event_eth_rx_adapter {
102         /* RSS key */
103         uint8_t rss_key_be[RSS_KEY_SIZE];
104         /* Event device identifier */
105         uint8_t eventdev_id;
106         /* Event port identifier */
107         uint8_t event_port_id;
108         /* Flag indicating per rxq event buffer */
109         bool use_queue_event_buf;
110         /* Per ethernet device structure */
111         struct eth_device_info *eth_devices;
112         /* Lock to serialize config updates with service function */
113         rte_spinlock_t rx_lock;
114         /* Max mbufs processed in any service function invocation */
115         uint32_t max_nb_rx;
116         /* Receive queues that need to be polled */
117         struct eth_rx_poll_entry *eth_rx_poll;
118         /* Size of the eth_rx_poll array */
119         uint16_t num_rx_polled;
120         /* Weighted round robin schedule */
121         uint32_t *wrr_sched;
122         /* wrr_sched[] size */
123         uint32_t wrr_len;
124         /* Next entry in wrr[] to begin polling */
125         uint32_t wrr_pos;
126         /* Event burst buffer */
127         struct eth_event_enqueue_buffer event_enqueue_buffer;
128         /* Vector enable flag */
129         uint8_t ena_vector;
130         /* Timestamp of previous vector expiry list traversal */
131         uint64_t prev_expiry_ts;
132         /* Minimum ticks to wait before traversing expiry list */
133         uint64_t vector_tmo_ticks;
134         /* vector list */
135         struct eth_rx_vector_data_list vector_list;
136         /* Per adapter stats */
137         struct rte_event_eth_rx_adapter_stats stats;
138         /* Block count, counts up to BLOCK_CNT_THRESHOLD */
139         uint16_t enq_block_count;
140         /* Block start ts */
141         uint64_t rx_enq_block_start_ts;
142         /* epoll fd used to wait for Rx interrupts */
143         int epd;
144         /* Num of interrupt driven interrupt queues */
145         uint32_t num_rx_intr;
146         /* Used to send <dev id, queue id> of interrupting Rx queues from
147          * the interrupt thread to the Rx thread
148          */
149         struct rte_ring *intr_ring;
150         /* Rx Queue data (dev id, queue id) for the last non-empty
151          * queue polled
152          */
153         union queue_data qd;
154         /* queue_data is valid */
155         int qd_valid;
156         /* Interrupt ring lock, synchronizes Rx thread
157          * and interrupt thread
158          */
159         rte_spinlock_t intr_ring_lock;
160         /* event array passed to rte_poll_wait */
161         struct rte_epoll_event *epoll_events;
162         /* Count of interrupt vectors in use */
163         uint32_t num_intr_vec;
164         /* Thread blocked on Rx interrupts */
165         pthread_t rx_intr_thread;
166         /* Configuration callback for rte_service configuration */
167         rte_event_eth_rx_adapter_conf_cb conf_cb;
168         /* Configuration callback argument */
169         void *conf_arg;
170         /* Set if  default_cb is being used */
171         int default_cb_arg;
172         /* Service initialization state */
173         uint8_t service_inited;
174         /* Total count of Rx queues in adapter */
175         uint32_t nb_queues;
176         /* Memory allocation name */
177         char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
178         /* Socket identifier cached from eventdev */
179         int socket_id;
180         /* Per adapter EAL service */
181         uint32_t service_id;
182         /* Adapter started flag */
183         uint8_t rxa_started;
184         /* Adapter ID */
185         uint8_t id;
186 } __rte_cache_aligned;
187
188 /* Per eth device */
189 struct eth_device_info {
190         struct rte_eth_dev *dev;
191         struct eth_rx_queue_info *rx_queue;
192         /* Rx callback */
193         rte_event_eth_rx_adapter_cb_fn cb_fn;
194         /* Rx callback argument */
195         void *cb_arg;
196         /* Set if ethdev->eventdev packet transfer uses a
197          * hardware mechanism
198          */
199         uint8_t internal_event_port;
200         /* Set if the adapter is processing rx queues for
201          * this eth device and packet processing has been
202          * started, allows for the code to know if the PMD
203          * rx_adapter_stop callback needs to be invoked
204          */
205         uint8_t dev_rx_started;
206         /* Number of queues added for this device */
207         uint16_t nb_dev_queues;
208         /* Number of poll based queues
209          * If nb_rx_poll > 0, the start callback will
210          * be invoked if not already invoked
211          */
212         uint16_t nb_rx_poll;
213         /* Number of interrupt based queues
214          * If nb_rx_intr > 0, the start callback will
215          * be invoked if not already invoked.
216          */
217         uint16_t nb_rx_intr;
218         /* Number of queues that use the shared interrupt */
219         uint16_t nb_shared_intr;
220         /* sum(wrr(q)) for all queues within the device
221          * useful when deleting all device queues
222          */
223         uint32_t wrr_len;
224         /* Intr based queue index to start polling from, this is used
225          * if the number of shared interrupts is non-zero
226          */
227         uint16_t next_q_idx;
228         /* Intr based queue indices */
229         uint16_t *intr_queue;
230         /* device generates per Rx queue interrupt for queue index
231          * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
232          */
233         int multi_intr_cap;
234         /* shared interrupt enabled */
235         int shared_intr_enabled;
236 };
237
238 /* Per Rx queue */
239 struct eth_rx_queue_info {
240         int queue_enabled;      /* True if added */
241         int intr_enabled;
242         uint8_t ena_vector;
243         uint16_t wt;            /* Polling weight */
244         uint32_t flow_id_mask;  /* Set to ~0 if app provides flow id else 0 */
245         uint64_t event;
246         struct eth_rx_vector_data vector_data;
247         struct eth_event_enqueue_buffer *event_buf;
248         /* use adapter stats struct for queue level stats,
249          * as same stats need to be updated for adapter and queue
250          */
251         struct rte_event_eth_rx_adapter_stats *stats;
252 };
253
254 static struct event_eth_rx_adapter **event_eth_rx_adapter;
255
256 /* Enable dynamic timestamp field in mbuf */
257 static uint64_t event_eth_rx_timestamp_dynflag;
258 static int event_eth_rx_timestamp_dynfield_offset = -1;
259
260 static inline rte_mbuf_timestamp_t *
261 rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
262 {
263         return RTE_MBUF_DYNFIELD(mbuf,
264                 event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
265 }
266
267 static inline int
268 rxa_validate_id(uint8_t id)
269 {
270         return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
271 }
272
273 static inline struct eth_event_enqueue_buffer *
274 rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
275                   uint16_t rx_queue_id,
276                   struct rte_event_eth_rx_adapter_stats **stats)
277 {
278         if (rx_adapter->use_queue_event_buf) {
279                 struct eth_device_info *dev_info =
280                         &rx_adapter->eth_devices[eth_dev_id];
281                 *stats = dev_info->rx_queue[rx_queue_id].stats;
282                 return dev_info->rx_queue[rx_queue_id].event_buf;
283         } else {
284                 *stats = &rx_adapter->stats;
285                 return &rx_adapter->event_enqueue_buffer;
286         }
287 }
288
289 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
290         if (!rxa_validate_id(id)) { \
291                 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
292                 return retval; \
293         } \
294 } while (0)
295
296 static inline int
297 rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
298 {
299         return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
300 }
301
302 /* Greatest common divisor */
303 static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
304 {
305         uint16_t r = a % b;
306
307         return r ? rxa_gcd_u16(b, r) : b;
308 }
309
310 /* Returns the next queue in the polling sequence
311  *
312  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
313  */
314 static int
315 rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
316              struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
317              uint16_t gcd, int prev)
318 {
319         int i = prev;
320         uint16_t w;
321
322         while (1) {
323                 uint16_t q;
324                 uint16_t d;
325
326                 i = (i + 1) % n;
327                 if (i == 0) {
328                         *cw = *cw - gcd;
329                         if (*cw <= 0)
330                                 *cw = max_wt;
331                 }
332
333                 q = eth_rx_poll[i].eth_rx_qid;
334                 d = eth_rx_poll[i].eth_dev_id;
335                 w = rx_adapter->eth_devices[d].rx_queue[q].wt;
336
337                 if ((int)w >= *cw)
338                         return i;
339         }
340 }
341
342 static inline int
343 rxa_shared_intr(struct eth_device_info *dev_info,
344         int rx_queue_id)
345 {
346         int multi_intr_cap;
347
348         if (dev_info->dev->intr_handle == NULL)
349                 return 0;
350
351         multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
352         return !multi_intr_cap ||
353                 rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
354 }
355
356 static inline int
357 rxa_intr_queue(struct eth_device_info *dev_info,
358         int rx_queue_id)
359 {
360         struct eth_rx_queue_info *queue_info;
361
362         queue_info = &dev_info->rx_queue[rx_queue_id];
363         return dev_info->rx_queue &&
364                 !dev_info->internal_event_port &&
365                 queue_info->queue_enabled && queue_info->wt == 0;
366 }
367
368 static inline int
369 rxa_polled_queue(struct eth_device_info *dev_info,
370         int rx_queue_id)
371 {
372         struct eth_rx_queue_info *queue_info;
373
374         queue_info = &dev_info->rx_queue[rx_queue_id];
375         return !dev_info->internal_event_port &&
376                 dev_info->rx_queue &&
377                 queue_info->queue_enabled && queue_info->wt != 0;
378 }
379
380 /* Calculate change in number of vectors after Rx queue ID is add/deleted */
381 static int
382 rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
383 {
384         uint16_t i;
385         int n, s;
386         uint16_t nbq;
387
388         nbq = dev_info->dev->data->nb_rx_queues;
389         n = 0; /* non shared count */
390         s = 0; /* shared count */
391
392         if (rx_queue_id == -1) {
393                 for (i = 0; i < nbq; i++) {
394                         if (!rxa_shared_intr(dev_info, i))
395                                 n += add ? !rxa_intr_queue(dev_info, i) :
396                                         rxa_intr_queue(dev_info, i);
397                         else
398                                 s += add ? !rxa_intr_queue(dev_info, i) :
399                                         rxa_intr_queue(dev_info, i);
400                 }
401
402                 if (s > 0) {
403                         if ((add && dev_info->nb_shared_intr == 0) ||
404                                 (!add && dev_info->nb_shared_intr))
405                                 n += 1;
406                 }
407         } else {
408                 if (!rxa_shared_intr(dev_info, rx_queue_id))
409                         n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
410                                 rxa_intr_queue(dev_info, rx_queue_id);
411                 else
412                         n = add ? !dev_info->nb_shared_intr :
413                                 dev_info->nb_shared_intr == 1;
414         }
415
416         return add ? n : -n;
417 }
418
419 /* Calculate nb_rx_intr after deleting interrupt mode rx queues
420  */
421 static void
422 rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
423                           struct eth_device_info *dev_info, int rx_queue_id,
424                           uint32_t *nb_rx_intr)
425 {
426         uint32_t intr_diff;
427
428         if (rx_queue_id == -1)
429                 intr_diff = dev_info->nb_rx_intr;
430         else
431                 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
432
433         *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
434 }
435
436 /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
437  * interrupt queues could currently be poll mode Rx queues
438  */
439 static void
440 rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
441                           struct eth_device_info *dev_info, int rx_queue_id,
442                           uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
443                           uint32_t *nb_wrr)
444 {
445         uint32_t intr_diff;
446         uint32_t poll_diff;
447         uint32_t wrr_len_diff;
448
449         if (rx_queue_id == -1) {
450                 intr_diff = dev_info->dev->data->nb_rx_queues -
451                                                 dev_info->nb_rx_intr;
452                 poll_diff = dev_info->nb_rx_poll;
453                 wrr_len_diff = dev_info->wrr_len;
454         } else {
455                 intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
456                 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
457                 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
458                                         0;
459         }
460
461         *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
462         *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
463         *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
464 }
465
466 /* Calculate size of the eth_rx_poll and wrr_sched arrays
467  * after deleting poll mode rx queues
468  */
469 static void
470 rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
471                           struct eth_device_info *dev_info, int rx_queue_id,
472                           uint32_t *nb_rx_poll, uint32_t *nb_wrr)
473 {
474         uint32_t poll_diff;
475         uint32_t wrr_len_diff;
476
477         if (rx_queue_id == -1) {
478                 poll_diff = dev_info->nb_rx_poll;
479                 wrr_len_diff = dev_info->wrr_len;
480         } else {
481                 poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
482                 wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
483                                         0;
484         }
485
486         *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
487         *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
488 }
489
490 /* Calculate nb_rx_* after adding poll mode rx queues
491  */
492 static void
493 rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
494                           struct eth_device_info *dev_info, int rx_queue_id,
495                           uint16_t wt, uint32_t *nb_rx_poll,
496                           uint32_t *nb_rx_intr, uint32_t *nb_wrr)
497 {
498         uint32_t intr_diff;
499         uint32_t poll_diff;
500         uint32_t wrr_len_diff;
501
502         if (rx_queue_id == -1) {
503                 intr_diff = dev_info->nb_rx_intr;
504                 poll_diff = dev_info->dev->data->nb_rx_queues -
505                                                 dev_info->nb_rx_poll;
506                 wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
507                                 - dev_info->wrr_len;
508         } else {
509                 intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
510                 poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
511                 wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
512                                 wt - dev_info->rx_queue[rx_queue_id].wt :
513                                 wt;
514         }
515
516         *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
517         *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
518         *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
519 }
520
521 /* Calculate nb_rx_* after adding rx_queue_id */
522 static void
523 rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
524                      struct eth_device_info *dev_info, int rx_queue_id,
525                      uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
526                      uint32_t *nb_wrr)
527 {
528         if (wt != 0)
529                 rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
530                                         wt, nb_rx_poll, nb_rx_intr, nb_wrr);
531         else
532                 rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
533                                         nb_rx_poll, nb_rx_intr, nb_wrr);
534 }
535
536 /* Calculate nb_rx_* after deleting rx_queue_id */
537 static void
538 rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
539                      struct eth_device_info *dev_info, int rx_queue_id,
540                      uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
541                      uint32_t *nb_wrr)
542 {
543         rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
544                                 nb_wrr);
545         rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
546                                 nb_rx_intr);
547 }
548
549 /*
550  * Allocate the rx_poll array
551  */
552 static struct eth_rx_poll_entry *
553 rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
554 {
555         size_t len;
556
557         len  = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
558                                                         RTE_CACHE_LINE_SIZE);
559         return  rte_zmalloc_socket(rx_adapter->mem_name,
560                                 len,
561                                 RTE_CACHE_LINE_SIZE,
562                                 rx_adapter->socket_id);
563 }
564
565 /*
566  * Allocate the WRR array
567  */
568 static uint32_t *
569 rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
570 {
571         size_t len;
572
573         len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
574                         RTE_CACHE_LINE_SIZE);
575         return  rte_zmalloc_socket(rx_adapter->mem_name,
576                                 len,
577                                 RTE_CACHE_LINE_SIZE,
578                                 rx_adapter->socket_id);
579 }
580
581 static int
582 rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
583                       uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
584                       uint32_t **wrr_sched)
585 {
586
587         if (nb_poll == 0) {
588                 *rx_poll = NULL;
589                 *wrr_sched = NULL;
590                 return 0;
591         }
592
593         *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
594         if (*rx_poll == NULL) {
595                 *wrr_sched = NULL;
596                 return -ENOMEM;
597         }
598
599         *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
600         if (*wrr_sched == NULL) {
601                 rte_free(*rx_poll);
602                 return -ENOMEM;
603         }
604         return 0;
605 }
606
607 /* Precalculate WRR polling sequence for all queues in rx_adapter */
608 static void
609 rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
610                       struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
611 {
612         uint16_t d;
613         uint16_t q;
614         unsigned int i;
615         int prev = -1;
616         int cw = -1;
617
618         /* Initialize variables for calculation of wrr schedule */
619         uint16_t max_wrr_pos = 0;
620         unsigned int poll_q = 0;
621         uint16_t max_wt = 0;
622         uint16_t gcd = 0;
623
624         if (rx_poll == NULL)
625                 return;
626
627         /* Generate array of all queues to poll, the size of this
628          * array is poll_q
629          */
630         RTE_ETH_FOREACH_DEV(d) {
631                 uint16_t nb_rx_queues;
632                 struct eth_device_info *dev_info =
633                                 &rx_adapter->eth_devices[d];
634                 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
635                 if (dev_info->rx_queue == NULL)
636                         continue;
637                 if (dev_info->internal_event_port)
638                         continue;
639                 dev_info->wrr_len = 0;
640                 for (q = 0; q < nb_rx_queues; q++) {
641                         struct eth_rx_queue_info *queue_info =
642                                 &dev_info->rx_queue[q];
643                         uint16_t wt;
644
645                         if (!rxa_polled_queue(dev_info, q))
646                                 continue;
647                         wt = queue_info->wt;
648                         rx_poll[poll_q].eth_dev_id = d;
649                         rx_poll[poll_q].eth_rx_qid = q;
650                         max_wrr_pos += wt;
651                         dev_info->wrr_len += wt;
652                         max_wt = RTE_MAX(max_wt, wt);
653                         gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
654                         poll_q++;
655                 }
656         }
657
658         /* Generate polling sequence based on weights */
659         prev = -1;
660         cw = -1;
661         for (i = 0; i < max_wrr_pos; i++) {
662                 rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
663                                      rx_poll, max_wt, gcd, prev);
664                 prev = rx_wrr[i];
665         }
666 }
667
668 static inline void
669 rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
670         struct rte_ipv6_hdr **ipv6_hdr)
671 {
672         struct rte_ether_hdr *eth_hdr =
673                 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
674         struct rte_vlan_hdr *vlan_hdr;
675
676         *ipv4_hdr = NULL;
677         *ipv6_hdr = NULL;
678
679         switch (eth_hdr->ether_type) {
680         case RTE_BE16(RTE_ETHER_TYPE_IPV4):
681                 *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
682                 break;
683
684         case RTE_BE16(RTE_ETHER_TYPE_IPV6):
685                 *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
686                 break;
687
688         case RTE_BE16(RTE_ETHER_TYPE_VLAN):
689                 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
690                 switch (vlan_hdr->eth_proto) {
691                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
692                         *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
693                         break;
694                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
695                         *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
696                         break;
697                 default:
698                         break;
699                 }
700                 break;
701
702         default:
703                 break;
704         }
705 }
706
707 /* Calculate RSS hash for IPv4/6 */
708 static inline uint32_t
709 rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
710 {
711         uint32_t input_len;
712         void *tuple;
713         struct rte_ipv4_tuple ipv4_tuple;
714         struct rte_ipv6_tuple ipv6_tuple;
715         struct rte_ipv4_hdr *ipv4_hdr;
716         struct rte_ipv6_hdr *ipv6_hdr;
717
718         rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
719
720         if (ipv4_hdr) {
721                 ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
722                 ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
723                 tuple = &ipv4_tuple;
724                 input_len = RTE_THASH_V4_L3_LEN;
725         } else if (ipv6_hdr) {
726                 rte_thash_load_v6_addrs(ipv6_hdr,
727                                         (union rte_thash_tuple *)&ipv6_tuple);
728                 tuple = &ipv6_tuple;
729                 input_len = RTE_THASH_V6_L3_LEN;
730         } else
731                 return 0;
732
733         return rte_softrss_be(tuple, input_len, rss_key_be);
734 }
735
736 static inline int
737 rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
738 {
739         return !!rx_adapter->enq_block_count;
740 }
741
742 static inline void
743 rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
744 {
745         if (rx_adapter->rx_enq_block_start_ts)
746                 return;
747
748         rx_adapter->enq_block_count++;
749         if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
750                 return;
751
752         rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
753 }
754
755 static inline void
756 rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
757                      struct rte_event_eth_rx_adapter_stats *stats)
758 {
759         if (unlikely(!stats->rx_enq_start_ts))
760                 stats->rx_enq_start_ts = rte_get_tsc_cycles();
761
762         if (likely(!rxa_enq_blocked(rx_adapter)))
763                 return;
764
765         rx_adapter->enq_block_count = 0;
766         if (rx_adapter->rx_enq_block_start_ts) {
767                 stats->rx_enq_end_ts = rte_get_tsc_cycles();
768                 stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
769                     rx_adapter->rx_enq_block_start_ts;
770                 rx_adapter->rx_enq_block_start_ts = 0;
771         }
772 }
773
774 /* Enqueue buffered events to event device */
775 static inline uint16_t
776 rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
777                        struct eth_event_enqueue_buffer *buf,
778                        struct rte_event_eth_rx_adapter_stats *stats)
779 {
780         uint16_t count = buf->last ? buf->last - buf->head : buf->count;
781
782         if (!count)
783                 return 0;
784
785         uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
786                                         rx_adapter->event_port_id,
787                                         &buf->events[buf->head],
788                                         count);
789         if (n != count)
790                 stats->rx_enq_retry++;
791
792         buf->head += n;
793
794         if (buf->last && n == count) {
795                 uint16_t n1;
796
797                 n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
798                                         rx_adapter->event_port_id,
799                                         &buf->events[0],
800                                         buf->tail);
801
802                 if (n1 != buf->tail)
803                         stats->rx_enq_retry++;
804
805                 buf->last = 0;
806                 buf->head = n1;
807                 buf->last_mask = 0;
808                 n += n1;
809         }
810
811         n ? rxa_enq_block_end_ts(rx_adapter, stats) :
812                 rxa_enq_block_start_ts(rx_adapter);
813
814         buf->count -= n;
815         stats->rx_enq_count += n;
816
817         return n;
818 }
819
820 static inline void
821 rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
822                 struct eth_rx_vector_data *vec)
823 {
824         vec->vector_ev->nb_elem = 0;
825         vec->vector_ev->port = vec->port;
826         vec->vector_ev->queue = vec->queue;
827         vec->vector_ev->attr_valid = true;
828         TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
829 }
830
831 static inline uint16_t
832 rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
833                         struct eth_rx_queue_info *queue_info,
834                         struct eth_event_enqueue_buffer *buf,
835                         struct rte_mbuf **mbufs, uint16_t num)
836 {
837         struct rte_event *ev = &buf->events[buf->count];
838         struct eth_rx_vector_data *vec;
839         uint16_t filled, space, sz;
840
841         filled = 0;
842         vec = &queue_info->vector_data;
843
844         if (vec->vector_ev == NULL) {
845                 if (rte_mempool_get(vec->vector_pool,
846                                     (void **)&vec->vector_ev) < 0) {
847                         rte_pktmbuf_free_bulk(mbufs, num);
848                         return 0;
849                 }
850                 rxa_init_vector(rx_adapter, vec);
851         }
852         while (num) {
853                 if (vec->vector_ev->nb_elem == vec->max_vector_count) {
854                         /* Event ready. */
855                         ev->event = vec->event;
856                         ev->vec = vec->vector_ev;
857                         ev++;
858                         filled++;
859                         vec->vector_ev = NULL;
860                         TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
861                         if (rte_mempool_get(vec->vector_pool,
862                                             (void **)&vec->vector_ev) < 0) {
863                                 rte_pktmbuf_free_bulk(mbufs, num);
864                                 return 0;
865                         }
866                         rxa_init_vector(rx_adapter, vec);
867                 }
868
869                 space = vec->max_vector_count - vec->vector_ev->nb_elem;
870                 sz = num > space ? space : num;
871                 memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
872                        sizeof(void *) * sz);
873                 vec->vector_ev->nb_elem += sz;
874                 num -= sz;
875                 mbufs += sz;
876                 vec->ts = rte_rdtsc();
877         }
878
879         if (vec->vector_ev->nb_elem == vec->max_vector_count) {
880                 ev->event = vec->event;
881                 ev->vec = vec->vector_ev;
882                 ev++;
883                 filled++;
884                 vec->vector_ev = NULL;
885                 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
886         }
887
888         return filled;
889 }
890
891 static inline void
892 rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
893                  uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
894                  struct eth_event_enqueue_buffer *buf,
895                  struct rte_event_eth_rx_adapter_stats *stats)
896 {
897         uint32_t i;
898         struct eth_device_info *dev_info =
899                                         &rx_adapter->eth_devices[eth_dev_id];
900         struct eth_rx_queue_info *eth_rx_queue_info =
901                                         &dev_info->rx_queue[rx_queue_id];
902         uint16_t new_tail = buf->tail;
903         uint64_t event = eth_rx_queue_info->event;
904         uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
905         struct rte_mbuf *m = mbufs[0];
906         uint32_t rss_mask;
907         uint32_t rss;
908         int do_rss;
909         uint16_t nb_cb;
910         uint16_t dropped;
911         uint64_t ts, ts_mask;
912
913         if (!eth_rx_queue_info->ena_vector) {
914                 ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
915                                                 0 : rte_get_tsc_cycles();
916
917                 /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
918                  * otherwise 0
919                  */
920                 ts_mask = (uint64_t)(!(m->ol_flags &
921                                        event_eth_rx_timestamp_dynflag)) - 1ULL;
922
923                 /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
924                 rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
925                 do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
926                 for (i = 0; i < num; i++) {
927                         struct rte_event *ev;
928
929                         m = mbufs[i];
930                         *rxa_timestamp_dynfield(m) = ts |
931                                         (*rxa_timestamp_dynfield(m) & ts_mask);
932
933                         ev = &buf->events[new_tail];
934
935                         rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
936                                      : m->hash.rss;
937                         ev->event = event;
938                         ev->flow_id = (rss & ~flow_id_mask) |
939                                       (ev->flow_id & flow_id_mask);
940                         ev->mbuf = m;
941                         new_tail++;
942                 }
943         } else {
944                 num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
945                                               buf, mbufs, num);
946         }
947
948         if (num && dev_info->cb_fn) {
949
950                 dropped = 0;
951                 nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
952                                        buf->last |
953                                        (buf->events_size & ~buf->last_mask),
954                                        buf->count >= BATCH_SIZE ?
955                                                 buf->count - BATCH_SIZE : 0,
956                                        &buf->events[buf->tail],
957                                        num,
958                                        dev_info->cb_arg,
959                                        &dropped);
960                 if (unlikely(nb_cb > num))
961                         RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
962                                 nb_cb, num);
963                 else
964                         num = nb_cb;
965                 if (dropped)
966                         stats->rx_dropped += dropped;
967         }
968
969         buf->count += num;
970         buf->tail += num;
971 }
972
973 static inline bool
974 rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
975 {
976         uint32_t nb_req = buf->tail + BATCH_SIZE;
977
978         if (!buf->last) {
979                 if (nb_req <= buf->events_size)
980                         return true;
981
982                 if (buf->head >= BATCH_SIZE) {
983                         buf->last_mask = ~0;
984                         buf->last = buf->tail;
985                         buf->tail = 0;
986                         return true;
987                 }
988         }
989
990         return nb_req <= buf->head;
991 }
992
993 /* Enqueue packets from  <port, q>  to event buffer */
994 static inline uint32_t
995 rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
996            uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
997            int *rxq_empty, struct eth_event_enqueue_buffer *buf,
998            struct rte_event_eth_rx_adapter_stats *stats)
999 {
1000         struct rte_mbuf *mbufs[BATCH_SIZE];
1001         uint16_t n;
1002         uint32_t nb_rx = 0;
1003
1004         if (rxq_empty)
1005                 *rxq_empty = 0;
1006         /* Don't do a batch dequeue from the rx queue if there isn't
1007          * enough space in the enqueue buffer.
1008          */
1009         while (rxa_pkt_buf_available(buf)) {
1010                 if (buf->count >= BATCH_SIZE)
1011                         rxa_flush_event_buffer(rx_adapter, buf, stats);
1012
1013                 stats->rx_poll_count++;
1014                 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
1015                 if (unlikely(!n)) {
1016                         if (rxq_empty)
1017                                 *rxq_empty = 1;
1018                         break;
1019                 }
1020                 rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
1021                                  stats);
1022                 nb_rx += n;
1023                 if (rx_count + nb_rx > max_rx)
1024                         break;
1025         }
1026
1027         if (buf->count > 0)
1028                 rxa_flush_event_buffer(rx_adapter, buf, stats);
1029
1030         stats->rx_packets += nb_rx;
1031
1032         return nb_rx;
1033 }
1034
1035 static inline void
1036 rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
1037 {
1038         uint16_t port_id;
1039         uint16_t queue;
1040         int err;
1041         union queue_data qd;
1042         struct eth_device_info *dev_info;
1043         struct eth_rx_queue_info *queue_info;
1044         int *intr_enabled;
1045
1046         qd.ptr = data;
1047         port_id = qd.port;
1048         queue = qd.queue;
1049
1050         dev_info = &rx_adapter->eth_devices[port_id];
1051         queue_info = &dev_info->rx_queue[queue];
1052         rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1053         if (rxa_shared_intr(dev_info, queue))
1054                 intr_enabled = &dev_info->shared_intr_enabled;
1055         else
1056                 intr_enabled = &queue_info->intr_enabled;
1057
1058         if (*intr_enabled) {
1059                 *intr_enabled = 0;
1060                 err = rte_ring_enqueue(rx_adapter->intr_ring, data);
1061                 /* Entry should always be available.
1062                  * The ring size equals the maximum number of interrupt
1063                  * vectors supported (an interrupt vector is shared in
1064                  * case of shared interrupts)
1065                  */
1066                 if (err)
1067                         RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
1068                                 " to ring: %s", strerror(-err));
1069                 else
1070                         rte_eth_dev_rx_intr_disable(port_id, queue);
1071         }
1072         rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1073 }
1074
1075 static int
1076 rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
1077                           uint32_t num_intr_vec)
1078 {
1079         if (rx_adapter->num_intr_vec + num_intr_vec >
1080                                 RTE_EVENT_ETH_INTR_RING_SIZE) {
1081                 RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
1082                 " %d needed %d limit %d", rx_adapter->num_intr_vec,
1083                 num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
1084                 return -ENOSPC;
1085         }
1086
1087         return 0;
1088 }
1089
1090 /* Delete entries for (dev, queue) from the interrupt ring */
1091 static void
1092 rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
1093                           struct eth_device_info *dev_info,
1094                           uint16_t rx_queue_id)
1095 {
1096         int i, n;
1097         union queue_data qd;
1098
1099         rte_spinlock_lock(&rx_adapter->intr_ring_lock);
1100
1101         n = rte_ring_count(rx_adapter->intr_ring);
1102         for (i = 0; i < n; i++) {
1103                 rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1104                 if (!rxa_shared_intr(dev_info, rx_queue_id)) {
1105                         if (qd.port == dev_info->dev->data->port_id &&
1106                                 qd.queue == rx_queue_id)
1107                                 continue;
1108                 } else {
1109                         if (qd.port == dev_info->dev->data->port_id)
1110                                 continue;
1111                 }
1112                 rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
1113         }
1114
1115         rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
1116 }
1117
1118 /* pthread callback handling interrupt mode receive queues
1119  * After receiving an Rx interrupt, it enqueues the port id and queue id of the
1120  * interrupting queue to the adapter's ring buffer for interrupt events.
1121  * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
1122  * the adapter service function.
1123  */
1124 static void *
1125 rxa_intr_thread(void *arg)
1126 {
1127         struct event_eth_rx_adapter *rx_adapter = arg;
1128         struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
1129         int n, i;
1130
1131         while (1) {
1132                 n = rte_epoll_wait(rx_adapter->epd, epoll_events,
1133                                 RTE_EVENT_ETH_INTR_RING_SIZE, -1);
1134                 if (unlikely(n < 0))
1135                         RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
1136                                         n);
1137                 for (i = 0; i < n; i++) {
1138                         rxa_intr_ring_enqueue(rx_adapter,
1139                                         epoll_events[i].epdata.data);
1140                 }
1141         }
1142
1143         return NULL;
1144 }
1145
1146 /* Dequeue <port, q> from interrupt ring and enqueue received
1147  * mbufs to eventdev
1148  */
1149 static inline void
1150 rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
1151 {
1152         uint32_t n;
1153         uint32_t nb_rx = 0;
1154         int rxq_empty;
1155         struct eth_event_enqueue_buffer *buf;
1156         struct rte_event_eth_rx_adapter_stats *stats;
1157         rte_spinlock_t *ring_lock;
1158         uint8_t max_done = 0;
1159
1160         if (rx_adapter->num_rx_intr == 0)
1161                 return;
1162
1163         if (rte_ring_count(rx_adapter->intr_ring) == 0
1164                 && !rx_adapter->qd_valid)
1165                 return;
1166
1167         buf = &rx_adapter->event_enqueue_buffer;
1168         stats = &rx_adapter->stats;
1169         ring_lock = &rx_adapter->intr_ring_lock;
1170
1171         if (buf->count >= BATCH_SIZE)
1172                 rxa_flush_event_buffer(rx_adapter, buf, stats);
1173
1174         while (rxa_pkt_buf_available(buf)) {
1175                 struct eth_device_info *dev_info;
1176                 uint16_t port;
1177                 uint16_t queue;
1178                 union queue_data qd  = rx_adapter->qd;
1179                 int err;
1180
1181                 if (!rx_adapter->qd_valid) {
1182                         struct eth_rx_queue_info *queue_info;
1183
1184                         rte_spinlock_lock(ring_lock);
1185                         err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
1186                         if (err) {
1187                                 rte_spinlock_unlock(ring_lock);
1188                                 break;
1189                         }
1190
1191                         port = qd.port;
1192                         queue = qd.queue;
1193                         rx_adapter->qd = qd;
1194                         rx_adapter->qd_valid = 1;
1195                         dev_info = &rx_adapter->eth_devices[port];
1196                         if (rxa_shared_intr(dev_info, queue))
1197                                 dev_info->shared_intr_enabled = 1;
1198                         else {
1199                                 queue_info = &dev_info->rx_queue[queue];
1200                                 queue_info->intr_enabled = 1;
1201                         }
1202                         rte_eth_dev_rx_intr_enable(port, queue);
1203                         rte_spinlock_unlock(ring_lock);
1204                 } else {
1205                         port = qd.port;
1206                         queue = qd.queue;
1207
1208                         dev_info = &rx_adapter->eth_devices[port];
1209                 }
1210
1211                 if (rxa_shared_intr(dev_info, queue)) {
1212                         uint16_t i;
1213                         uint16_t nb_queues;
1214
1215                         nb_queues = dev_info->dev->data->nb_rx_queues;
1216                         n = 0;
1217                         for (i = dev_info->next_q_idx; i < nb_queues; i++) {
1218                                 uint8_t enq_buffer_full;
1219
1220                                 if (!rxa_intr_queue(dev_info, i))
1221                                         continue;
1222                                 n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
1223                                         rx_adapter->max_nb_rx,
1224                                         &rxq_empty, buf, stats);
1225                                 nb_rx += n;
1226
1227                                 enq_buffer_full = !rxq_empty && n == 0;
1228                                 max_done = nb_rx > rx_adapter->max_nb_rx;
1229
1230                                 if (enq_buffer_full || max_done) {
1231                                         dev_info->next_q_idx = i;
1232                                         goto done;
1233                                 }
1234                         }
1235
1236                         rx_adapter->qd_valid = 0;
1237
1238                         /* Reinitialize for next interrupt */
1239                         dev_info->next_q_idx = dev_info->multi_intr_cap ?
1240                                                 RTE_MAX_RXTX_INTR_VEC_ID - 1 :
1241                                                 0;
1242                 } else {
1243                         n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
1244                                 rx_adapter->max_nb_rx,
1245                                 &rxq_empty, buf, stats);
1246                         rx_adapter->qd_valid = !rxq_empty;
1247                         nb_rx += n;
1248                         if (nb_rx > rx_adapter->max_nb_rx)
1249                                 break;
1250                 }
1251         }
1252
1253 done:
1254         rx_adapter->stats.rx_intr_packets += nb_rx;
1255 }
1256
1257 /*
1258  * Polls receive queues added to the event adapter and enqueues received
1259  * packets to the event device.
1260  *
1261  * The receive code enqueues initially to a temporary buffer, the
1262  * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
1263  *
1264  * If there isn't space available in the temporary buffer, packets from the
1265  * Rx queue aren't dequeued from the eth device, this back pressures the
1266  * eth device, in virtual device environments this back pressure is relayed to
1267  * the hypervisor's switching layer where adjustments can be made to deal with
1268  * it.
1269  */
1270 static inline void
1271 rxa_poll(struct event_eth_rx_adapter *rx_adapter)
1272 {
1273         uint32_t num_queue;
1274         uint32_t nb_rx = 0;
1275         struct eth_event_enqueue_buffer *buf = NULL;
1276         struct rte_event_eth_rx_adapter_stats *stats = NULL;
1277         uint32_t wrr_pos;
1278         uint32_t max_nb_rx;
1279
1280         wrr_pos = rx_adapter->wrr_pos;
1281         max_nb_rx = rx_adapter->max_nb_rx;
1282
1283         /* Iterate through a WRR sequence */
1284         for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
1285                 unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
1286                 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
1287                 uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
1288
1289                 buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
1290
1291                 /* Don't do a batch dequeue from the rx queue if there isn't
1292                  * enough space in the enqueue buffer.
1293                  */
1294                 if (buf->count >= BATCH_SIZE)
1295                         rxa_flush_event_buffer(rx_adapter, buf, stats);
1296                 if (!rxa_pkt_buf_available(buf)) {
1297                         if (rx_adapter->use_queue_event_buf)
1298                                 goto poll_next_entry;
1299                         else {
1300                                 rx_adapter->wrr_pos = wrr_pos;
1301                                 return;
1302                         }
1303                 }
1304
1305                 nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1306                                 NULL, buf, stats);
1307                 if (nb_rx > max_nb_rx) {
1308                         rx_adapter->wrr_pos =
1309                                     (wrr_pos + 1) % rx_adapter->wrr_len;
1310                         break;
1311                 }
1312
1313 poll_next_entry:
1314                 if (++wrr_pos == rx_adapter->wrr_len)
1315                         wrr_pos = 0;
1316         }
1317 }
1318
1319 static void
1320 rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
1321 {
1322         struct event_eth_rx_adapter *rx_adapter = arg;
1323         struct eth_event_enqueue_buffer *buf = NULL;
1324         struct rte_event_eth_rx_adapter_stats *stats = NULL;
1325         struct rte_event *ev;
1326
1327         buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
1328
1329         if (buf->count)
1330                 rxa_flush_event_buffer(rx_adapter, buf, stats);
1331
1332         if (vec->vector_ev->nb_elem == 0)
1333                 return;
1334         ev = &buf->events[buf->count];
1335
1336         /* Event ready. */
1337         ev->event = vec->event;
1338         ev->vec = vec->vector_ev;
1339         buf->count++;
1340
1341         vec->vector_ev = NULL;
1342         vec->ts = 0;
1343 }
1344
1345 static int
1346 rxa_service_func(void *args)
1347 {
1348         struct event_eth_rx_adapter *rx_adapter = args;
1349
1350         if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
1351                 return 0;
1352         if (!rx_adapter->rxa_started) {
1353                 rte_spinlock_unlock(&rx_adapter->rx_lock);
1354                 return 0;
1355         }
1356
1357         if (rx_adapter->ena_vector) {
1358                 if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
1359                     rx_adapter->vector_tmo_ticks) {
1360                         struct eth_rx_vector_data *vec;
1361
1362                         TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1363                                 uint64_t elapsed_time = rte_rdtsc() - vec->ts;
1364
1365                                 if (elapsed_time >= vec->vector_timeout_ticks) {
1366                                         rxa_vector_expire(vec, rx_adapter);
1367                                         TAILQ_REMOVE(&rx_adapter->vector_list,
1368                                                      vec, next);
1369                                 }
1370                         }
1371                         rx_adapter->prev_expiry_ts = rte_rdtsc();
1372                 }
1373         }
1374
1375         rxa_intr_ring_dequeue(rx_adapter);
1376         rxa_poll(rx_adapter);
1377
1378         rte_spinlock_unlock(&rx_adapter->rx_lock);
1379
1380         return 0;
1381 }
1382
1383 static int
1384 rte_event_eth_rx_adapter_init(void)
1385 {
1386         const char *name = RXA_ADAPTER_ARRAY;
1387         const struct rte_memzone *mz;
1388         unsigned int sz;
1389
1390         sz = sizeof(*event_eth_rx_adapter) *
1391             RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
1392         sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
1393
1394         mz = rte_memzone_lookup(name);
1395         if (mz == NULL) {
1396                 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
1397                                                  RTE_CACHE_LINE_SIZE);
1398                 if (mz == NULL) {
1399                         RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
1400                                         PRId32, rte_errno);
1401                         return -rte_errno;
1402                 }
1403         }
1404
1405         event_eth_rx_adapter = mz->addr;
1406         return 0;
1407 }
1408
1409 static int
1410 rxa_memzone_lookup(void)
1411 {
1412         const struct rte_memzone *mz;
1413
1414         if (event_eth_rx_adapter == NULL) {
1415                 mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1416                 if (mz == NULL)
1417                         return -ENOMEM;
1418                 event_eth_rx_adapter = mz->addr;
1419         }
1420
1421         return 0;
1422 }
1423
1424 static inline struct event_eth_rx_adapter *
1425 rxa_id_to_adapter(uint8_t id)
1426 {
1427         return event_eth_rx_adapter ?
1428                 event_eth_rx_adapter[id] : NULL;
1429 }
1430
1431 static int
1432 rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
1433                 struct rte_event_eth_rx_adapter_conf *conf, void *arg)
1434 {
1435         int ret;
1436         struct rte_eventdev *dev;
1437         struct rte_event_dev_config dev_conf;
1438         int started;
1439         uint8_t port_id;
1440         struct rte_event_port_conf *port_conf = arg;
1441         struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
1442
1443         dev = &rte_eventdevs[rx_adapter->eventdev_id];
1444         dev_conf = dev->data->dev_conf;
1445
1446         started = dev->data->dev_started;
1447         if (started)
1448                 rte_event_dev_stop(dev_id);
1449         port_id = dev_conf.nb_event_ports;
1450         dev_conf.nb_event_ports += 1;
1451         ret = rte_event_dev_configure(dev_id, &dev_conf);
1452         if (ret) {
1453                 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
1454                                                 dev_id);
1455                 if (started) {
1456                         if (rte_event_dev_start(dev_id))
1457                                 return -EIO;
1458                 }
1459                 return ret;
1460         }
1461
1462         ret = rte_event_port_setup(dev_id, port_id, port_conf);
1463         if (ret) {
1464                 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
1465                                         port_id);
1466                 return ret;
1467         }
1468
1469         conf->event_port_id = port_id;
1470         conf->max_nb_rx = 128;
1471         if (started)
1472                 ret = rte_event_dev_start(dev_id);
1473         rx_adapter->default_cb_arg = 1;
1474         return ret;
1475 }
1476
1477 static int
1478 rxa_epoll_create1(void)
1479 {
1480 #if defined(LINUX)
1481         int fd;
1482         fd = epoll_create1(EPOLL_CLOEXEC);
1483         return fd < 0 ? -errno : fd;
1484 #elif defined(BSD)
1485         return -ENOTSUP;
1486 #endif
1487 }
1488
1489 static int
1490 rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
1491 {
1492         if (rx_adapter->epd != INIT_FD)
1493                 return 0;
1494
1495         rx_adapter->epd = rxa_epoll_create1();
1496         if (rx_adapter->epd < 0) {
1497                 int err = rx_adapter->epd;
1498                 rx_adapter->epd = INIT_FD;
1499                 RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
1500                 return err;
1501         }
1502
1503         return 0;
1504 }
1505
1506 static int
1507 rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1508 {
1509         int err;
1510         char thread_name[RTE_MAX_THREAD_NAME_LEN];
1511
1512         if (rx_adapter->intr_ring)
1513                 return 0;
1514
1515         rx_adapter->intr_ring = rte_ring_create("intr_ring",
1516                                         RTE_EVENT_ETH_INTR_RING_SIZE,
1517                                         rte_socket_id(), 0);
1518         if (!rx_adapter->intr_ring)
1519                 return -ENOMEM;
1520
1521         rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
1522                                         RTE_EVENT_ETH_INTR_RING_SIZE *
1523                                         sizeof(struct rte_epoll_event),
1524                                         RTE_CACHE_LINE_SIZE,
1525                                         rx_adapter->socket_id);
1526         if (!rx_adapter->epoll_events) {
1527                 err = -ENOMEM;
1528                 goto error;
1529         }
1530
1531         rte_spinlock_init(&rx_adapter->intr_ring_lock);
1532
1533         snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
1534                         "rx-intr-thread-%d", rx_adapter->id);
1535
1536         err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
1537                                 NULL, rxa_intr_thread, rx_adapter);
1538         if (!err)
1539                 return 0;
1540
1541         RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1542         rte_free(rx_adapter->epoll_events);
1543 error:
1544         rte_ring_free(rx_adapter->intr_ring);
1545         rx_adapter->intr_ring = NULL;
1546         rx_adapter->epoll_events = NULL;
1547         return err;
1548 }
1549
1550 static int
1551 rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
1552 {
1553         int err;
1554
1555         err = pthread_cancel(rx_adapter->rx_intr_thread);
1556         if (err)
1557                 RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
1558                                 err);
1559
1560         err = pthread_join(rx_adapter->rx_intr_thread, NULL);
1561         if (err)
1562                 RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
1563
1564         rte_free(rx_adapter->epoll_events);
1565         rte_ring_free(rx_adapter->intr_ring);
1566         rx_adapter->intr_ring = NULL;
1567         rx_adapter->epoll_events = NULL;
1568         return 0;
1569 }
1570
1571 static int
1572 rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
1573 {
1574         int ret;
1575
1576         if (rx_adapter->num_rx_intr == 0)
1577                 return 0;
1578
1579         ret = rxa_destroy_intr_thread(rx_adapter);
1580         if (ret)
1581                 return ret;
1582
1583         close(rx_adapter->epd);
1584         rx_adapter->epd = INIT_FD;
1585
1586         return ret;
1587 }
1588
1589 static int
1590 rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
1591                  struct eth_device_info *dev_info, uint16_t rx_queue_id)
1592 {
1593         int err;
1594         uint16_t eth_dev_id = dev_info->dev->data->port_id;
1595         int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1596
1597         err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1598         if (err) {
1599                 RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
1600                         rx_queue_id);
1601                 return err;
1602         }
1603
1604         err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1605                                         rx_adapter->epd,
1606                                         RTE_INTR_EVENT_DEL,
1607                                         0);
1608         if (err)
1609                 RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
1610
1611         if (sintr)
1612                 dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
1613         else
1614                 dev_info->shared_intr_enabled = 0;
1615         return err;
1616 }
1617
1618 static int
1619 rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1620                    struct eth_device_info *dev_info, int rx_queue_id)
1621 {
1622         int err;
1623         int i;
1624         int s;
1625
1626         if (dev_info->nb_rx_intr == 0)
1627                 return 0;
1628
1629         err = 0;
1630         if (rx_queue_id == -1) {
1631                 s = dev_info->nb_shared_intr;
1632                 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1633                         int sintr;
1634                         uint16_t q;
1635
1636                         q = dev_info->intr_queue[i];
1637                         sintr = rxa_shared_intr(dev_info, q);
1638                         s -= sintr;
1639
1640                         if (!sintr || s == 0) {
1641
1642                                 err = rxa_disable_intr(rx_adapter, dev_info,
1643                                                 q);
1644                                 if (err)
1645                                         return err;
1646                                 rxa_intr_ring_del_entries(rx_adapter, dev_info,
1647                                                         q);
1648                         }
1649                 }
1650         } else {
1651                 if (!rxa_intr_queue(dev_info, rx_queue_id))
1652                         return 0;
1653                 if (!rxa_shared_intr(dev_info, rx_queue_id) ||
1654                                 dev_info->nb_shared_intr == 1) {
1655                         err = rxa_disable_intr(rx_adapter, dev_info,
1656                                         rx_queue_id);
1657                         if (err)
1658                                 return err;
1659                         rxa_intr_ring_del_entries(rx_adapter, dev_info,
1660                                                 rx_queue_id);
1661                 }
1662
1663                 for (i = 0; i < dev_info->nb_rx_intr; i++) {
1664                         if (dev_info->intr_queue[i] == rx_queue_id) {
1665                                 for (; i < dev_info->nb_rx_intr - 1; i++)
1666                                         dev_info->intr_queue[i] =
1667                                                 dev_info->intr_queue[i + 1];
1668                                 break;
1669                         }
1670                 }
1671         }
1672
1673         return err;
1674 }
1675
1676 static int
1677 rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
1678                 struct eth_device_info *dev_info, uint16_t rx_queue_id)
1679 {
1680         int err, err1;
1681         uint16_t eth_dev_id = dev_info->dev->data->port_id;
1682         union queue_data qd;
1683         int init_fd;
1684         uint16_t *intr_queue;
1685         int sintr = rxa_shared_intr(dev_info, rx_queue_id);
1686
1687         if (rxa_intr_queue(dev_info, rx_queue_id))
1688                 return 0;
1689
1690         intr_queue = dev_info->intr_queue;
1691         if (dev_info->intr_queue == NULL) {
1692                 size_t len =
1693                         dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
1694                 dev_info->intr_queue =
1695                         rte_zmalloc_socket(
1696                                 rx_adapter->mem_name,
1697                                 len,
1698                                 0,
1699                                 rx_adapter->socket_id);
1700                 if (dev_info->intr_queue == NULL)
1701                         return -ENOMEM;
1702         }
1703
1704         init_fd = rx_adapter->epd;
1705         err = rxa_init_epd(rx_adapter);
1706         if (err)
1707                 goto err_free_queue;
1708
1709         qd.port = eth_dev_id;
1710         qd.queue = rx_queue_id;
1711
1712         err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1713                                         rx_adapter->epd,
1714                                         RTE_INTR_EVENT_ADD,
1715                                         qd.ptr);
1716         if (err) {
1717                 RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
1718                         " Rx Queue %u err %d", rx_queue_id, err);
1719                 goto err_del_fd;
1720         }
1721
1722         err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
1723         if (err) {
1724                 RTE_EDEV_LOG_ERR("Could not enable interrupt for"
1725                                 " Rx Queue %u err %d", rx_queue_id, err);
1726
1727                 goto err_del_event;
1728         }
1729
1730         err = rxa_create_intr_thread(rx_adapter);
1731         if (!err)  {
1732                 if (sintr)
1733                         dev_info->shared_intr_enabled = 1;
1734                 else
1735                         dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
1736                 return 0;
1737         }
1738
1739
1740         err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
1741         if (err)
1742                 RTE_EDEV_LOG_ERR("Could not disable interrupt for"
1743                                 " Rx Queue %u err %d", rx_queue_id, err);
1744 err_del_event:
1745         err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
1746                                         rx_adapter->epd,
1747                                         RTE_INTR_EVENT_DEL,
1748                                         0);
1749         if (err1) {
1750                 RTE_EDEV_LOG_ERR("Could not delete event for"
1751                                 " Rx Queue %u err %d", rx_queue_id, err1);
1752         }
1753 err_del_fd:
1754         if (init_fd == INIT_FD) {
1755                 close(rx_adapter->epd);
1756                 rx_adapter->epd = -1;
1757         }
1758 err_free_queue:
1759         if (intr_queue == NULL)
1760                 rte_free(dev_info->intr_queue);
1761
1762         return err;
1763 }
1764
1765 static int
1766 rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1767                    struct eth_device_info *dev_info, int rx_queue_id)
1768
1769 {
1770         int i, j, err;
1771         int si = -1;
1772         int shared_done = (dev_info->nb_shared_intr > 0);
1773
1774         if (rx_queue_id != -1) {
1775                 if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
1776                         return 0;
1777                 return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
1778         }
1779
1780         err = 0;
1781         for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
1782
1783                 if (rxa_shared_intr(dev_info, i) && shared_done)
1784                         continue;
1785
1786                 err = rxa_config_intr(rx_adapter, dev_info, i);
1787
1788                 shared_done = err == 0 && rxa_shared_intr(dev_info, i);
1789                 if (shared_done) {
1790                         si = i;
1791                         dev_info->shared_intr_enabled = 1;
1792                 }
1793                 if (err)
1794                         break;
1795         }
1796
1797         if (err == 0)
1798                 return 0;
1799
1800         shared_done = (dev_info->nb_shared_intr > 0);
1801         for (j = 0; j < i; j++) {
1802                 if (rxa_intr_queue(dev_info, j))
1803                         continue;
1804                 if (rxa_shared_intr(dev_info, j) && si != j)
1805                         continue;
1806                 err = rxa_disable_intr(rx_adapter, dev_info, j);
1807                 if (err)
1808                         break;
1809
1810         }
1811
1812         return err;
1813 }
1814
1815 static int
1816 rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
1817 {
1818         int ret;
1819         struct rte_service_spec service;
1820         struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
1821
1822         if (rx_adapter->service_inited)
1823                 return 0;
1824
1825         memset(&service, 0, sizeof(service));
1826         snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
1827                 "rte_event_eth_rx_adapter_%d", id);
1828         service.socket_id = rx_adapter->socket_id;
1829         service.callback = rxa_service_func;
1830         service.callback_userdata = rx_adapter;
1831         /* Service function handles locking for queue add/del updates */
1832         service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
1833         ret = rte_service_component_register(&service, &rx_adapter->service_id);
1834         if (ret) {
1835                 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
1836                         service.name, ret);
1837                 return ret;
1838         }
1839
1840         ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
1841                 &rx_adapter_conf, rx_adapter->conf_arg);
1842         if (ret) {
1843                 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
1844                         ret);
1845                 goto err_done;
1846         }
1847         rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
1848         rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
1849         rx_adapter->service_inited = 1;
1850         rx_adapter->epd = INIT_FD;
1851         return 0;
1852
1853 err_done:
1854         rte_service_component_unregister(rx_adapter->service_id);
1855         return ret;
1856 }
1857
1858 static void
1859 rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
1860                  struct eth_device_info *dev_info, int32_t rx_queue_id,
1861                  uint8_t add)
1862 {
1863         struct eth_rx_queue_info *queue_info;
1864         int enabled;
1865         uint16_t i;
1866
1867         if (dev_info->rx_queue == NULL)
1868                 return;
1869
1870         if (rx_queue_id == -1) {
1871                 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
1872                         rxa_update_queue(rx_adapter, dev_info, i, add);
1873         } else {
1874                 queue_info = &dev_info->rx_queue[rx_queue_id];
1875                 enabled = queue_info->queue_enabled;
1876                 if (add) {
1877                         rx_adapter->nb_queues += !enabled;
1878                         dev_info->nb_dev_queues += !enabled;
1879                 } else {
1880                         rx_adapter->nb_queues -= enabled;
1881                         dev_info->nb_dev_queues -= enabled;
1882                 }
1883                 queue_info->queue_enabled = !!add;
1884         }
1885 }
1886
1887 static void
1888 rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
1889                     uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
1890                     uint16_t port_id)
1891 {
1892 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
1893         struct eth_rx_vector_data *vector_data;
1894         uint32_t flow_id;
1895
1896         vector_data = &queue_info->vector_data;
1897         vector_data->max_vector_count = vector_count;
1898         vector_data->port = port_id;
1899         vector_data->queue = qid;
1900         vector_data->vector_pool = mp;
1901         vector_data->vector_timeout_ticks =
1902                 NSEC2TICK(vector_ns, rte_get_timer_hz());
1903         vector_data->ts = 0;
1904         flow_id = queue_info->event & 0xFFFFF;
1905         flow_id =
1906                 flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
1907         vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
1908 }
1909
1910 static void
1911 rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
1912            struct eth_device_info *dev_info, int32_t rx_queue_id)
1913 {
1914         struct eth_rx_vector_data *vec;
1915         int pollq;
1916         int intrq;
1917         int sintrq;
1918
1919
1920         if (rx_adapter->nb_queues == 0)
1921                 return;
1922
1923         if (rx_queue_id == -1) {
1924                 uint16_t nb_rx_queues;
1925                 uint16_t i;
1926
1927                 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1928                 for (i = 0; i < nb_rx_queues; i++)
1929                         rxa_sw_del(rx_adapter, dev_info, i);
1930                 return;
1931         }
1932
1933         /* Push all the partial event vectors to event device. */
1934         TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
1935                 if (vec->queue != rx_queue_id)
1936                         continue;
1937                 rxa_vector_expire(vec, rx_adapter);
1938                 TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
1939         }
1940
1941         pollq = rxa_polled_queue(dev_info, rx_queue_id);
1942         intrq = rxa_intr_queue(dev_info, rx_queue_id);
1943         sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1944         rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
1945         rx_adapter->num_rx_polled -= pollq;
1946         dev_info->nb_rx_poll -= pollq;
1947         rx_adapter->num_rx_intr -= intrq;
1948         dev_info->nb_rx_intr -= intrq;
1949         dev_info->nb_shared_intr -= intrq && sintrq;
1950         if (rx_adapter->use_queue_event_buf) {
1951                 struct eth_event_enqueue_buffer *event_buf =
1952                         dev_info->rx_queue[rx_queue_id].event_buf;
1953                 struct rte_event_eth_rx_adapter_stats *stats =
1954                         dev_info->rx_queue[rx_queue_id].stats;
1955                 rte_free(event_buf->events);
1956                 rte_free(event_buf);
1957                 rte_free(stats);
1958                 dev_info->rx_queue[rx_queue_id].event_buf = NULL;
1959                 dev_info->rx_queue[rx_queue_id].stats = NULL;
1960         }
1961 }
1962
1963 static int
1964 rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
1965               struct eth_device_info *dev_info, int32_t rx_queue_id,
1966               const struct rte_event_eth_rx_adapter_queue_conf *conf)
1967 {
1968         struct eth_rx_queue_info *queue_info;
1969         const struct rte_event *ev = &conf->ev;
1970         int pollq;
1971         int intrq;
1972         int sintrq;
1973         struct rte_event *qi_ev;
1974         struct eth_event_enqueue_buffer *new_rx_buf = NULL;
1975         struct rte_event_eth_rx_adapter_stats *stats = NULL;
1976         uint16_t eth_dev_id = dev_info->dev->data->port_id;
1977         int ret;
1978
1979         if (rx_queue_id == -1) {
1980                 uint16_t nb_rx_queues;
1981                 uint16_t i;
1982
1983                 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1984                 for (i = 0; i < nb_rx_queues; i++) {
1985                         ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
1986                         if (ret)
1987                                 return ret;
1988                 }
1989                 return 0;
1990         }
1991
1992         pollq = rxa_polled_queue(dev_info, rx_queue_id);
1993         intrq = rxa_intr_queue(dev_info, rx_queue_id);
1994         sintrq = rxa_shared_intr(dev_info, rx_queue_id);
1995
1996         queue_info = &dev_info->rx_queue[rx_queue_id];
1997         queue_info->wt = conf->servicing_weight;
1998
1999         qi_ev = (struct rte_event *)&queue_info->event;
2000         qi_ev->event = ev->event;
2001         qi_ev->op = RTE_EVENT_OP_NEW;
2002         qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
2003         qi_ev->sub_event_type = 0;
2004
2005         if (conf->rx_queue_flags &
2006                         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
2007                 queue_info->flow_id_mask = ~0;
2008         } else
2009                 qi_ev->flow_id = 0;
2010
2011         if (conf->rx_queue_flags &
2012             RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2013                 queue_info->ena_vector = 1;
2014                 qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
2015                 rxa_set_vector_data(queue_info, conf->vector_sz,
2016                                     conf->vector_timeout_ns, conf->vector_mp,
2017                                     rx_queue_id, dev_info->dev->data->port_id);
2018                 rx_adapter->ena_vector = 1;
2019                 rx_adapter->vector_tmo_ticks =
2020                         rx_adapter->vector_tmo_ticks ?
2021                                       RTE_MIN(queue_info->vector_data
2022                                                         .vector_timeout_ticks >>
2023                                                 1,
2024                                         rx_adapter->vector_tmo_ticks) :
2025                                 queue_info->vector_data.vector_timeout_ticks >>
2026                                         1;
2027         }
2028
2029         rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
2030         if (rxa_polled_queue(dev_info, rx_queue_id)) {
2031                 rx_adapter->num_rx_polled += !pollq;
2032                 dev_info->nb_rx_poll += !pollq;
2033                 rx_adapter->num_rx_intr -= intrq;
2034                 dev_info->nb_rx_intr -= intrq;
2035                 dev_info->nb_shared_intr -= intrq && sintrq;
2036         }
2037
2038         if (rxa_intr_queue(dev_info, rx_queue_id)) {
2039                 rx_adapter->num_rx_polled -= pollq;
2040                 dev_info->nb_rx_poll -= pollq;
2041                 rx_adapter->num_rx_intr += !intrq;
2042                 dev_info->nb_rx_intr += !intrq;
2043                 dev_info->nb_shared_intr += !intrq && sintrq;
2044                 if (dev_info->nb_shared_intr == 1) {
2045                         if (dev_info->multi_intr_cap)
2046                                 dev_info->next_q_idx =
2047                                         RTE_MAX_RXTX_INTR_VEC_ID - 1;
2048                         else
2049                                 dev_info->next_q_idx = 0;
2050                 }
2051         }
2052
2053         if (!rx_adapter->use_queue_event_buf)
2054                 return 0;
2055
2056         new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2057                                 sizeof(*new_rx_buf), 0,
2058                                 rte_eth_dev_socket_id(eth_dev_id));
2059         if (new_rx_buf == NULL) {
2060                 RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2061                                  "dev_id: %d queue_id: %d",
2062                                  eth_dev_id, rx_queue_id);
2063                 return -ENOMEM;
2064         }
2065
2066         new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2067         new_rx_buf->events_size += (2 * BATCH_SIZE);
2068         new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2069                                 sizeof(struct rte_event) *
2070                                 new_rx_buf->events_size, 0,
2071                                 rte_eth_dev_socket_id(eth_dev_id));
2072         if (new_rx_buf->events == NULL) {
2073                 rte_free(new_rx_buf);
2074                 RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2075                                  "dev_id: %d queue_id: %d",
2076                                  eth_dev_id, rx_queue_id);
2077                 return -ENOMEM;
2078         }
2079
2080         queue_info->event_buf = new_rx_buf;
2081
2082         /* Allocate storage for adapter queue stats */
2083         stats = rte_zmalloc_socket("rx_queue_stats",
2084                                 sizeof(*stats), 0,
2085                                 rte_eth_dev_socket_id(eth_dev_id));
2086         if (stats == NULL) {
2087                 rte_free(new_rx_buf->events);
2088                 rte_free(new_rx_buf);
2089                 RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
2090                                  " dev_id: %d queue_id: %d",
2091                                  eth_dev_id, rx_queue_id);
2092                 return -ENOMEM;
2093         }
2094
2095         queue_info->stats = stats;
2096
2097         return 0;
2098 }
2099
2100 static int
2101 rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
2102            int rx_queue_id,
2103            const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2104 {
2105         struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
2106         struct rte_event_eth_rx_adapter_queue_conf temp_conf;
2107         int ret;
2108         struct eth_rx_poll_entry *rx_poll;
2109         struct eth_rx_queue_info *rx_queue;
2110         uint32_t *rx_wrr;
2111         uint16_t nb_rx_queues;
2112         uint32_t nb_rx_poll, nb_wrr;
2113         uint32_t nb_rx_intr;
2114         int num_intr_vec;
2115         uint16_t wt;
2116
2117         if (queue_conf->servicing_weight == 0) {
2118                 struct rte_eth_dev_data *data = dev_info->dev->data;
2119
2120                 temp_conf = *queue_conf;
2121                 if (!data->dev_conf.intr_conf.rxq) {
2122                         /* If Rx interrupts are disabled set wt = 1 */
2123                         temp_conf.servicing_weight = 1;
2124                 }
2125                 queue_conf = &temp_conf;
2126
2127                 if (queue_conf->servicing_weight == 0 &&
2128                     rx_adapter->use_queue_event_buf) {
2129
2130                         RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2131                                          "not supported for interrupt queues "
2132                                          "dev_id: %d queue_id: %d",
2133                                          eth_dev_id, rx_queue_id);
2134                         return -EINVAL;
2135                 }
2136         }
2137
2138         nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2139         rx_queue = dev_info->rx_queue;
2140         wt = queue_conf->servicing_weight;
2141
2142         if (dev_info->rx_queue == NULL) {
2143                 dev_info->rx_queue =
2144                     rte_zmalloc_socket(rx_adapter->mem_name,
2145                                        nb_rx_queues *
2146                                        sizeof(struct eth_rx_queue_info), 0,
2147                                        rx_adapter->socket_id);
2148                 if (dev_info->rx_queue == NULL)
2149                         return -ENOMEM;
2150         }
2151         rx_wrr = NULL;
2152         rx_poll = NULL;
2153
2154         rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
2155                         queue_conf->servicing_weight,
2156                         &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2157
2158         if (dev_info->dev->intr_handle)
2159                 dev_info->multi_intr_cap =
2160                         rte_intr_cap_multiple(dev_info->dev->intr_handle);
2161
2162         ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2163                                 &rx_poll, &rx_wrr);
2164         if (ret)
2165                 goto err_free_rxqueue;
2166
2167         if (wt == 0) {
2168                 num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
2169
2170                 ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
2171                 if (ret)
2172                         goto err_free_rxqueue;
2173
2174                 ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
2175                 if (ret)
2176                         goto err_free_rxqueue;
2177         } else {
2178
2179                 num_intr_vec = 0;
2180                 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2181                         num_intr_vec = rxa_nb_intr_vect(dev_info,
2182                                                 rx_queue_id, 0);
2183                         /* interrupt based queues are being converted to
2184                          * poll mode queues, delete the interrupt configuration
2185                          * for those.
2186                          */
2187                         ret = rxa_del_intr_queue(rx_adapter,
2188                                                 dev_info, rx_queue_id);
2189                         if (ret)
2190                                 goto err_free_rxqueue;
2191                 }
2192         }
2193
2194         if (nb_rx_intr == 0) {
2195                 ret = rxa_free_intr_resources(rx_adapter);
2196                 if (ret)
2197                         goto err_free_rxqueue;
2198         }
2199
2200         if (wt == 0) {
2201                 uint16_t i;
2202
2203                 if (rx_queue_id  == -1) {
2204                         for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
2205                                 dev_info->intr_queue[i] = i;
2206                 } else {
2207                         if (!rxa_intr_queue(dev_info, rx_queue_id))
2208                                 dev_info->intr_queue[nb_rx_intr - 1] =
2209                                         rx_queue_id;
2210                 }
2211         }
2212
2213
2214
2215         ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2216         if (ret)
2217                 goto err_free_rxqueue;
2218         rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2219
2220         rte_free(rx_adapter->eth_rx_poll);
2221         rte_free(rx_adapter->wrr_sched);
2222
2223         rx_adapter->eth_rx_poll = rx_poll;
2224         rx_adapter->wrr_sched = rx_wrr;
2225         rx_adapter->wrr_len = nb_wrr;
2226         rx_adapter->num_intr_vec += num_intr_vec;
2227         return 0;
2228
2229 err_free_rxqueue:
2230         if (rx_queue == NULL) {
2231                 rte_free(dev_info->rx_queue);
2232                 dev_info->rx_queue = NULL;
2233         }
2234
2235         rte_free(rx_poll);
2236         rte_free(rx_wrr);
2237
2238         return ret;
2239 }
2240
2241 static int
2242 rxa_ctrl(uint8_t id, int start)
2243 {
2244         struct event_eth_rx_adapter *rx_adapter;
2245         struct rte_eventdev *dev;
2246         struct eth_device_info *dev_info;
2247         uint32_t i;
2248         int use_service = 0;
2249         int stop = !start;
2250
2251         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2252         rx_adapter = rxa_id_to_adapter(id);
2253         if (rx_adapter == NULL)
2254                 return -EINVAL;
2255
2256         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2257
2258         RTE_ETH_FOREACH_DEV(i) {
2259                 dev_info = &rx_adapter->eth_devices[i];
2260                 /* if start  check for num dev queues */
2261                 if (start && !dev_info->nb_dev_queues)
2262                         continue;
2263                 /* if stop check if dev has been started */
2264                 if (stop && !dev_info->dev_rx_started)
2265                         continue;
2266                 use_service |= !dev_info->internal_event_port;
2267                 dev_info->dev_rx_started = start;
2268                 if (dev_info->internal_event_port == 0)
2269                         continue;
2270                 start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
2271                                                 &rte_eth_devices[i]) :
2272                         (*dev->dev_ops->eth_rx_adapter_stop)(dev,
2273                                                 &rte_eth_devices[i]);
2274         }
2275
2276         if (use_service) {
2277                 rte_spinlock_lock(&rx_adapter->rx_lock);
2278                 rx_adapter->rxa_started = start;
2279                 rte_service_runstate_set(rx_adapter->service_id, start);
2280                 rte_spinlock_unlock(&rx_adapter->rx_lock);
2281         }
2282
2283         return 0;
2284 }
2285
2286 static int
2287 rxa_create(uint8_t id, uint8_t dev_id,
2288            struct rte_event_eth_rx_adapter_params *rxa_params,
2289            rte_event_eth_rx_adapter_conf_cb conf_cb,
2290            void *conf_arg)
2291 {
2292         struct event_eth_rx_adapter *rx_adapter;
2293         struct eth_event_enqueue_buffer *buf;
2294         struct rte_event *events;
2295         int ret;
2296         int socket_id;
2297         uint16_t i;
2298         char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
2299         const uint8_t default_rss_key[] = {
2300                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
2301                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
2302                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
2303                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
2304                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
2305         };
2306
2307         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2308         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2309
2310         if (conf_cb == NULL)
2311                 return -EINVAL;
2312
2313         if (event_eth_rx_adapter == NULL) {
2314                 ret = rte_event_eth_rx_adapter_init();
2315                 if (ret)
2316                         return ret;
2317         }
2318
2319         rx_adapter = rxa_id_to_adapter(id);
2320         if (rx_adapter != NULL) {
2321                 RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
2322                 return -EEXIST;
2323         }
2324
2325         socket_id = rte_event_dev_socket_id(dev_id);
2326         snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
2327                 "rte_event_eth_rx_adapter_%d",
2328                 id);
2329
2330         rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
2331                         RTE_CACHE_LINE_SIZE, socket_id);
2332         if (rx_adapter == NULL) {
2333                 RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
2334                 return -ENOMEM;
2335         }
2336
2337         rx_adapter->eventdev_id = dev_id;
2338         rx_adapter->socket_id = socket_id;
2339         rx_adapter->conf_cb = conf_cb;
2340         rx_adapter->conf_arg = conf_arg;
2341         rx_adapter->id = id;
2342         TAILQ_INIT(&rx_adapter->vector_list);
2343         strcpy(rx_adapter->mem_name, mem_name);
2344         rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
2345                                         RTE_MAX_ETHPORTS *
2346                                         sizeof(struct eth_device_info), 0,
2347                                         socket_id);
2348         rte_convert_rss_key((const uint32_t *)default_rss_key,
2349                         (uint32_t *)rx_adapter->rss_key_be,
2350                             RTE_DIM(default_rss_key));
2351
2352         if (rx_adapter->eth_devices == NULL) {
2353                 RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
2354                 rte_free(rx_adapter);
2355                 return -ENOMEM;
2356         }
2357
2358         rte_spinlock_init(&rx_adapter->rx_lock);
2359
2360         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
2361                 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
2362
2363         /* Rx adapter event buffer allocation */
2364         rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2365
2366         if (!rx_adapter->use_queue_event_buf) {
2367                 buf = &rx_adapter->event_enqueue_buffer;
2368                 buf->events_size = rxa_params->event_buf_size;
2369
2370                 events = rte_zmalloc_socket(rx_adapter->mem_name,
2371                                             buf->events_size * sizeof(*events),
2372                                             0, socket_id);
2373                 if (events == NULL) {
2374                         RTE_EDEV_LOG_ERR("Failed to allocate memory "
2375                                          "for adapter event buffer");
2376                         rte_free(rx_adapter->eth_devices);
2377                         rte_free(rx_adapter);
2378                         return -ENOMEM;
2379                 }
2380
2381                 rx_adapter->event_enqueue_buffer.events = events;
2382         }
2383
2384         event_eth_rx_adapter[id] = rx_adapter;
2385
2386         if (conf_cb == rxa_default_conf_cb)
2387                 rx_adapter->default_cb_arg = 1;
2388
2389         if (rte_mbuf_dyn_rx_timestamp_register(
2390                         &event_eth_rx_timestamp_dynfield_offset,
2391                         &event_eth_rx_timestamp_dynflag) != 0) {
2392                 RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
2393                 return -rte_errno;
2394         }
2395
2396         rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
2397                 conf_arg);
2398         return 0;
2399 }
2400
2401 int
2402 rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2403                                 rte_event_eth_rx_adapter_conf_cb conf_cb,
2404                                 void *conf_arg)
2405 {
2406         struct rte_event_eth_rx_adapter_params rxa_params = {0};
2407
2408         /* use default values for adapter params */
2409         rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2410         rxa_params.use_queue_event_buf = false;
2411
2412         return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2413 }
2414
2415 int
2416 rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2417                         struct rte_event_port_conf *port_config,
2418                         struct rte_event_eth_rx_adapter_params *rxa_params)
2419 {
2420         struct rte_event_port_conf *pc;
2421         int ret;
2422         struct rte_event_eth_rx_adapter_params temp_params = {0};
2423
2424         if (port_config == NULL)
2425                 return -EINVAL;
2426
2427         if (rxa_params == NULL) {
2428                 /* use default values if rxa_params is NULL */
2429                 rxa_params = &temp_params;
2430                 rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2431                 rxa_params->use_queue_event_buf = false;
2432         } else if ((!rxa_params->use_queue_event_buf &&
2433                     rxa_params->event_buf_size == 0) ||
2434                    (rxa_params->use_queue_event_buf &&
2435                     rxa_params->event_buf_size != 0)) {
2436                 RTE_EDEV_LOG_ERR("Invalid adapter params\n");
2437                 return -EINVAL;
2438         } else if (!rxa_params->use_queue_event_buf) {
2439                 /* adjust event buff size with BATCH_SIZE used for fetching
2440                  * packets from NIC rx queues to get full buffer utilization
2441                  * and prevent unnecessary rollovers.
2442                  */
2443
2444                 rxa_params->event_buf_size =
2445                         RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
2446                 rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2447         }
2448
2449         pc = rte_malloc(NULL, sizeof(*pc), 0);
2450         if (pc == NULL)
2451                 return -ENOMEM;
2452
2453         *pc = *port_config;
2454
2455         ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2456         if (ret)
2457                 rte_free(pc);
2458
2459         return ret;
2460 }
2461
2462 int
2463 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
2464                 struct rte_event_port_conf *port_config)
2465 {
2466         struct rte_event_port_conf *pc;
2467         int ret;
2468
2469         if (port_config == NULL)
2470                 return -EINVAL;
2471
2472         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2473
2474         pc = rte_malloc(NULL, sizeof(*pc), 0);
2475         if (pc == NULL)
2476                 return -ENOMEM;
2477         *pc = *port_config;
2478
2479         ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
2480                                         rxa_default_conf_cb,
2481                                         pc);
2482         if (ret)
2483                 rte_free(pc);
2484         return ret;
2485 }
2486
2487 int
2488 rte_event_eth_rx_adapter_free(uint8_t id)
2489 {
2490         struct event_eth_rx_adapter *rx_adapter;
2491
2492         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2493
2494         rx_adapter = rxa_id_to_adapter(id);
2495         if (rx_adapter == NULL)
2496                 return -EINVAL;
2497
2498         if (rx_adapter->nb_queues) {
2499                 RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
2500                                 rx_adapter->nb_queues);
2501                 return -EBUSY;
2502         }
2503
2504         if (rx_adapter->default_cb_arg)
2505                 rte_free(rx_adapter->conf_arg);
2506         rte_free(rx_adapter->eth_devices);
2507         if (!rx_adapter->use_queue_event_buf)
2508                 rte_free(rx_adapter->event_enqueue_buffer.events);
2509         rte_free(rx_adapter);
2510         event_eth_rx_adapter[id] = NULL;
2511
2512         rte_eventdev_trace_eth_rx_adapter_free(id);
2513         return 0;
2514 }
2515
2516 int
2517 rte_event_eth_rx_adapter_queue_add(uint8_t id,
2518                 uint16_t eth_dev_id,
2519                 int32_t rx_queue_id,
2520                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2521 {
2522         int ret;
2523         uint32_t cap;
2524         struct event_eth_rx_adapter *rx_adapter;
2525         struct rte_eventdev *dev;
2526         struct eth_device_info *dev_info;
2527         struct rte_event_eth_rx_adapter_vector_limits limits;
2528
2529         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2530         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2531
2532         rx_adapter = rxa_id_to_adapter(id);
2533         if ((rx_adapter == NULL) || (queue_conf == NULL))
2534                 return -EINVAL;
2535
2536         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2537         ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2538                                                 eth_dev_id,
2539                                                 &cap);
2540         if (ret) {
2541                 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2542                         "eth port %" PRIu16, id, eth_dev_id);
2543                 return ret;
2544         }
2545
2546         if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
2547                 && (queue_conf->rx_queue_flags &
2548                         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
2549                 RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
2550                                 " eth port: %" PRIu16 " adapter id: %" PRIu8,
2551                                 eth_dev_id, id);
2552                 return -EINVAL;
2553         }
2554
2555         if (queue_conf->rx_queue_flags &
2556             RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2557
2558                 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
2559                         RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2560                                          " eth port: %" PRIu16
2561                                          " adapter id: %" PRIu8,
2562                                          eth_dev_id, id);
2563                         return -EINVAL;
2564                 }
2565
2566                 ret = rte_event_eth_rx_adapter_vector_limits_get(
2567                         rx_adapter->eventdev_id, eth_dev_id, &limits);
2568                 if (ret < 0) {
2569                         RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2570                                          " eth port: %" PRIu16
2571                                          " adapter id: %" PRIu8,
2572                                          eth_dev_id, id);
2573                         return -EINVAL;
2574                 }
2575                 if (queue_conf->vector_sz < limits.min_sz ||
2576                     queue_conf->vector_sz > limits.max_sz ||
2577                     queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2578                     queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2579                     queue_conf->vector_mp == NULL) {
2580                         RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2581                                          " eth port: %" PRIu16
2582                                          " adapter id: %" PRIu8,
2583                                          eth_dev_id, id);
2584                         return -EINVAL;
2585                 }
2586                 if (queue_conf->vector_mp->elt_size <
2587                     (sizeof(struct rte_event_vector) +
2588                      (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2589                         RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2590                                          " eth port: %" PRIu16
2591                                          " adapter id: %" PRIu8,
2592                                          eth_dev_id, id);
2593                         return -EINVAL;
2594                 }
2595         }
2596
2597         if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
2598                 (rx_queue_id != -1)) {
2599                 RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
2600                         "event queue, eth port: %" PRIu16 " adapter id: %"
2601                         PRIu8, eth_dev_id, id);
2602                 return -EINVAL;
2603         }
2604
2605         if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2606                         rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2607                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2608                          (uint16_t)rx_queue_id);
2609                 return -EINVAL;
2610         }
2611
2612         if ((rx_adapter->use_queue_event_buf &&
2613              queue_conf->event_buf_size == 0) ||
2614             (!rx_adapter->use_queue_event_buf &&
2615              queue_conf->event_buf_size != 0)) {
2616                 RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2617                 return -EINVAL;
2618         }
2619
2620         dev_info = &rx_adapter->eth_devices[eth_dev_id];
2621
2622         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2623                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
2624                                         -ENOTSUP);
2625                 if (dev_info->rx_queue == NULL) {
2626                         dev_info->rx_queue =
2627                             rte_zmalloc_socket(rx_adapter->mem_name,
2628                                         dev_info->dev->data->nb_rx_queues *
2629                                         sizeof(struct eth_rx_queue_info), 0,
2630                                         rx_adapter->socket_id);
2631                         if (dev_info->rx_queue == NULL)
2632                                 return -ENOMEM;
2633                 }
2634
2635                 ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
2636                                 &rte_eth_devices[eth_dev_id],
2637                                 rx_queue_id, queue_conf);
2638                 if (ret == 0) {
2639                         dev_info->internal_event_port = 1;
2640                         rxa_update_queue(rx_adapter,
2641                                         &rx_adapter->eth_devices[eth_dev_id],
2642                                         rx_queue_id,
2643                                         1);
2644                 }
2645         } else {
2646                 rte_spinlock_lock(&rx_adapter->rx_lock);
2647                 dev_info->internal_event_port = 0;
2648                 ret = rxa_init_service(rx_adapter, id);
2649                 if (ret == 0) {
2650                         uint32_t service_id = rx_adapter->service_id;
2651                         ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
2652                                         queue_conf);
2653                         rte_service_component_runstate_set(service_id,
2654                                 rxa_sw_adapter_queue_count(rx_adapter));
2655                 }
2656                 rte_spinlock_unlock(&rx_adapter->rx_lock);
2657         }
2658
2659         rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
2660                 rx_queue_id, queue_conf, ret);
2661         if (ret)
2662                 return ret;
2663
2664         return 0;
2665 }
2666
2667 static int
2668 rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
2669 {
2670         limits->max_sz = MAX_VECTOR_SIZE;
2671         limits->min_sz = MIN_VECTOR_SIZE;
2672         limits->max_timeout_ns = MAX_VECTOR_NS;
2673         limits->min_timeout_ns = MIN_VECTOR_NS;
2674
2675         return 0;
2676 }
2677
2678 int
2679 rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
2680                                 int32_t rx_queue_id)
2681 {
2682         int ret = 0;
2683         struct rte_eventdev *dev;
2684         struct event_eth_rx_adapter *rx_adapter;
2685         struct eth_device_info *dev_info;
2686         uint32_t cap;
2687         uint32_t nb_rx_poll = 0;
2688         uint32_t nb_wrr = 0;
2689         uint32_t nb_rx_intr;
2690         struct eth_rx_poll_entry *rx_poll = NULL;
2691         uint32_t *rx_wrr = NULL;
2692         int num_intr_vec;
2693
2694         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2695         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2696
2697         rx_adapter = rxa_id_to_adapter(id);
2698         if (rx_adapter == NULL)
2699                 return -EINVAL;
2700
2701         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2702         ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
2703                                                 eth_dev_id,
2704                                                 &cap);
2705         if (ret)
2706                 return ret;
2707
2708         if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
2709                 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2710                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
2711                          (uint16_t)rx_queue_id);
2712                 return -EINVAL;
2713         }
2714
2715         dev_info = &rx_adapter->eth_devices[eth_dev_id];
2716
2717         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2718                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
2719                                  -ENOTSUP);
2720                 ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
2721                                                 &rte_eth_devices[eth_dev_id],
2722                                                 rx_queue_id);
2723                 if (ret == 0) {
2724                         rxa_update_queue(rx_adapter,
2725                                         &rx_adapter->eth_devices[eth_dev_id],
2726                                         rx_queue_id,
2727                                         0);
2728                         if (dev_info->nb_dev_queues == 0) {
2729                                 rte_free(dev_info->rx_queue);
2730                                 dev_info->rx_queue = NULL;
2731                         }
2732                 }
2733         } else {
2734                 rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
2735                         &nb_rx_poll, &nb_rx_intr, &nb_wrr);
2736
2737                 ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
2738                         &rx_poll, &rx_wrr);
2739                 if (ret)
2740                         return ret;
2741
2742                 rte_spinlock_lock(&rx_adapter->rx_lock);
2743
2744                 num_intr_vec = 0;
2745                 if (rx_adapter->num_rx_intr > nb_rx_intr) {
2746
2747                         num_intr_vec = rxa_nb_intr_vect(dev_info,
2748                                                 rx_queue_id, 0);
2749                         ret = rxa_del_intr_queue(rx_adapter, dev_info,
2750                                         rx_queue_id);
2751                         if (ret)
2752                                 goto unlock_ret;
2753                 }
2754
2755                 if (nb_rx_intr == 0) {
2756                         ret = rxa_free_intr_resources(rx_adapter);
2757                         if (ret)
2758                                 goto unlock_ret;
2759                 }
2760
2761                 rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
2762                 rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
2763
2764                 rte_free(rx_adapter->eth_rx_poll);
2765                 rte_free(rx_adapter->wrr_sched);
2766
2767                 if (nb_rx_intr == 0) {
2768                         rte_free(dev_info->intr_queue);
2769                         dev_info->intr_queue = NULL;
2770                 }
2771
2772                 rx_adapter->eth_rx_poll = rx_poll;
2773                 rx_adapter->wrr_sched = rx_wrr;
2774                 rx_adapter->wrr_len = nb_wrr;
2775                 /*
2776                  * reset next poll start position (wrr_pos) to avoid buffer
2777                  * overrun when wrr_len is reduced in case of queue delete
2778                  */
2779                 rx_adapter->wrr_pos = 0;
2780                 rx_adapter->num_intr_vec += num_intr_vec;
2781
2782                 if (dev_info->nb_dev_queues == 0) {
2783                         rte_free(dev_info->rx_queue);
2784                         dev_info->rx_queue = NULL;
2785                 }
2786 unlock_ret:
2787                 rte_spinlock_unlock(&rx_adapter->rx_lock);
2788                 if (ret) {
2789                         rte_free(rx_poll);
2790                         rte_free(rx_wrr);
2791                         return ret;
2792                 }
2793
2794                 rte_service_component_runstate_set(rx_adapter->service_id,
2795                                 rxa_sw_adapter_queue_count(rx_adapter));
2796         }
2797
2798         rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
2799                 rx_queue_id, ret);
2800         return ret;
2801 }
2802
2803 int
2804 rte_event_eth_rx_adapter_vector_limits_get(
2805         uint8_t dev_id, uint16_t eth_port_id,
2806         struct rte_event_eth_rx_adapter_vector_limits *limits)
2807 {
2808         struct rte_eventdev *dev;
2809         uint32_t cap;
2810         int ret;
2811
2812         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2813         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
2814
2815         if (limits == NULL)
2816                 return -EINVAL;
2817
2818         dev = &rte_eventdevs[dev_id];
2819
2820         ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
2821         if (ret) {
2822                 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
2823                                  "eth port %" PRIu16,
2824                                  dev_id, eth_port_id);
2825                 return ret;
2826         }
2827
2828         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
2829                 RTE_FUNC_PTR_OR_ERR_RET(
2830                         *dev->dev_ops->eth_rx_adapter_vector_limits_get,
2831                         -ENOTSUP);
2832                 ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
2833                         dev, &rte_eth_devices[eth_port_id], limits);
2834         } else {
2835                 ret = rxa_sw_vector_limits(limits);
2836         }
2837
2838         return ret;
2839 }
2840
2841 int
2842 rte_event_eth_rx_adapter_start(uint8_t id)
2843 {
2844         rte_eventdev_trace_eth_rx_adapter_start(id);
2845         return rxa_ctrl(id, 1);
2846 }
2847
2848 int
2849 rte_event_eth_rx_adapter_stop(uint8_t id)
2850 {
2851         rte_eventdev_trace_eth_rx_adapter_stop(id);
2852         return rxa_ctrl(id, 0);
2853 }
2854
2855 static inline void
2856 rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
2857 {
2858         struct rte_event_eth_rx_adapter_stats *q_stats;
2859
2860         q_stats = queue_info->stats;
2861         memset(q_stats, 0, sizeof(*q_stats));
2862 }
2863
2864 int
2865 rte_event_eth_rx_adapter_stats_get(uint8_t id,
2866                                struct rte_event_eth_rx_adapter_stats *stats)
2867 {
2868         struct event_eth_rx_adapter *rx_adapter;
2869         struct eth_event_enqueue_buffer *buf;
2870         struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
2871         struct rte_event_eth_rx_adapter_stats dev_stats;
2872         struct rte_eventdev *dev;
2873         struct eth_device_info *dev_info;
2874         struct eth_rx_queue_info *queue_info;
2875         struct rte_event_eth_rx_adapter_stats *q_stats;
2876         uint32_t i, j;
2877         int ret;
2878
2879         if (rxa_memzone_lookup())
2880                 return -ENOMEM;
2881
2882         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2883
2884         rx_adapter = rxa_id_to_adapter(id);
2885         if (rx_adapter  == NULL || stats == NULL)
2886                 return -EINVAL;
2887
2888         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2889         memset(stats, 0, sizeof(*stats));
2890
2891         if (rx_adapter->service_inited)
2892                 *stats = rx_adapter->stats;
2893
2894         RTE_ETH_FOREACH_DEV(i) {
2895                 dev_info = &rx_adapter->eth_devices[i];
2896
2897                 if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
2898
2899                         for (j = 0; j < dev_info->dev->data->nb_rx_queues;
2900                              j++) {
2901                                 queue_info = &dev_info->rx_queue[j];
2902                                 if (!queue_info->queue_enabled)
2903                                         continue;
2904                                 q_stats = queue_info->stats;
2905
2906                                 stats->rx_packets += q_stats->rx_packets;
2907                                 stats->rx_poll_count += q_stats->rx_poll_count;
2908                                 stats->rx_enq_count += q_stats->rx_enq_count;
2909                                 stats->rx_enq_retry += q_stats->rx_enq_retry;
2910                                 stats->rx_dropped += q_stats->rx_dropped;
2911                                 stats->rx_enq_block_cycles +=
2912                                                 q_stats->rx_enq_block_cycles;
2913                         }
2914                 }
2915
2916                 if (dev_info->internal_event_port == 0 ||
2917                         dev->dev_ops->eth_rx_adapter_stats_get == NULL)
2918                         continue;
2919                 ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
2920                                                 &rte_eth_devices[i],
2921                                                 &dev_stats);
2922                 if (ret)
2923                         continue;
2924                 dev_stats_sum.rx_packets += dev_stats.rx_packets;
2925                 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
2926         }
2927
2928         buf = &rx_adapter->event_enqueue_buffer;
2929         stats->rx_packets += dev_stats_sum.rx_packets;
2930         stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2931         stats->rx_event_buf_count = buf->count;
2932         stats->rx_event_buf_size = buf->events_size;
2933
2934         return 0;
2935 }
2936
2937 int
2938 rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
2939                 uint16_t eth_dev_id,
2940                 uint16_t rx_queue_id,
2941                 struct rte_event_eth_rx_adapter_queue_stats *stats)
2942 {
2943         struct event_eth_rx_adapter *rx_adapter;
2944         struct eth_device_info *dev_info;
2945         struct eth_rx_queue_info *queue_info;
2946         struct eth_event_enqueue_buffer *event_buf;
2947         struct rte_event_eth_rx_adapter_stats *q_stats;
2948         struct rte_eventdev *dev;
2949
2950         if (rxa_memzone_lookup())
2951                 return -ENOMEM;
2952
2953         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2954         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2955
2956         rx_adapter = rxa_id_to_adapter(id);
2957
2958         if (rx_adapter == NULL || stats == NULL)
2959                 return -EINVAL;
2960
2961         if (!rx_adapter->use_queue_event_buf)
2962                 return -EINVAL;
2963
2964         if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2965                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
2966                 return -EINVAL;
2967         }
2968
2969         dev_info = &rx_adapter->eth_devices[eth_dev_id];
2970         if (dev_info->rx_queue == NULL ||
2971             !dev_info->rx_queue[rx_queue_id].queue_enabled) {
2972                 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
2973                 return -EINVAL;
2974         }
2975
2976         queue_info = &dev_info->rx_queue[rx_queue_id];
2977         event_buf = queue_info->event_buf;
2978         q_stats = queue_info->stats;
2979
2980         stats->rx_event_buf_count = event_buf->count;
2981         stats->rx_event_buf_size = event_buf->events_size;
2982         stats->rx_packets = q_stats->rx_packets;
2983         stats->rx_poll_count = q_stats->rx_poll_count;
2984         stats->rx_dropped = q_stats->rx_dropped;
2985
2986         dev = &rte_eventdevs[rx_adapter->eventdev_id];
2987         if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
2988                 return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
2989                                                 &rte_eth_devices[eth_dev_id],
2990                                                 rx_queue_id, stats);
2991         }
2992
2993         return 0;
2994 }
2995
2996 int
2997 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
2998 {
2999         struct event_eth_rx_adapter *rx_adapter;
3000         struct rte_eventdev *dev;
3001         struct eth_device_info *dev_info;
3002         struct eth_rx_queue_info *queue_info;
3003         uint32_t i, j;
3004
3005         if (rxa_memzone_lookup())
3006                 return -ENOMEM;
3007
3008         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3009
3010         rx_adapter = rxa_id_to_adapter(id);
3011         if (rx_adapter == NULL)
3012                 return -EINVAL;
3013
3014         dev = &rte_eventdevs[rx_adapter->eventdev_id];
3015
3016         RTE_ETH_FOREACH_DEV(i) {
3017                 dev_info = &rx_adapter->eth_devices[i];
3018
3019                 if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
3020
3021                         for (j = 0; j < dev_info->dev->data->nb_rx_queues;
3022                                                 j++) {
3023                                 queue_info = &dev_info->rx_queue[j];
3024                                 if (!queue_info->queue_enabled)
3025                                         continue;
3026                                 rxa_queue_stats_reset(queue_info);
3027                         }
3028                 }
3029
3030                 if (dev_info->internal_event_port == 0 ||
3031                         dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
3032                         continue;
3033                 (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
3034                                                         &rte_eth_devices[i]);
3035         }
3036
3037         memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
3038
3039         return 0;
3040 }
3041
3042 int
3043 rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
3044                 uint16_t eth_dev_id,
3045                 uint16_t rx_queue_id)
3046 {
3047         struct event_eth_rx_adapter *rx_adapter;
3048         struct eth_device_info *dev_info;
3049         struct eth_rx_queue_info *queue_info;
3050         struct rte_eventdev *dev;
3051
3052         if (rxa_memzone_lookup())
3053                 return -ENOMEM;
3054
3055         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3056         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3057
3058         rx_adapter = rxa_id_to_adapter(id);
3059         if (rx_adapter == NULL)
3060                 return -EINVAL;
3061
3062         if (!rx_adapter->use_queue_event_buf)
3063                 return -EINVAL;
3064
3065         if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3066                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
3067                 return -EINVAL;
3068         }
3069
3070         dev_info = &rx_adapter->eth_devices[eth_dev_id];
3071
3072         if (dev_info->rx_queue == NULL ||
3073             !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3074                 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3075                 return -EINVAL;
3076         }
3077
3078         queue_info = &dev_info->rx_queue[rx_queue_id];
3079         rxa_queue_stats_reset(queue_info);
3080
3081         dev = &rte_eventdevs[rx_adapter->eventdev_id];
3082         if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
3083                 return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
3084                                                 &rte_eth_devices[eth_dev_id],
3085                                                 rx_queue_id);
3086         }
3087
3088         return 0;
3089 }
3090
3091 int
3092 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
3093 {
3094         struct event_eth_rx_adapter *rx_adapter;
3095
3096         if (rxa_memzone_lookup())
3097                 return -ENOMEM;
3098
3099         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3100
3101         rx_adapter = rxa_id_to_adapter(id);
3102         if (rx_adapter == NULL || service_id == NULL)
3103                 return -EINVAL;
3104
3105         if (rx_adapter->service_inited)
3106                 *service_id = rx_adapter->service_id;
3107
3108         return rx_adapter->service_inited ? 0 : -ESRCH;
3109 }
3110
3111 int
3112 rte_event_eth_rx_adapter_cb_register(uint8_t id,
3113                                         uint16_t eth_dev_id,
3114                                         rte_event_eth_rx_adapter_cb_fn cb_fn,
3115                                         void *cb_arg)
3116 {
3117         struct event_eth_rx_adapter *rx_adapter;
3118         struct eth_device_info *dev_info;
3119         uint32_t cap;
3120         int ret;
3121
3122         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3123         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3124
3125         rx_adapter = rxa_id_to_adapter(id);
3126         if (rx_adapter == NULL)
3127                 return -EINVAL;
3128
3129         dev_info = &rx_adapter->eth_devices[eth_dev_id];
3130         if (dev_info->rx_queue == NULL)
3131                 return -EINVAL;
3132
3133         ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
3134                                                 eth_dev_id,
3135                                                 &cap);
3136         if (ret) {
3137                 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
3138                         "eth port %" PRIu16, id, eth_dev_id);
3139                 return ret;
3140         }
3141
3142         if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
3143                 RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
3144                                 PRIu16, eth_dev_id);
3145                 return -EINVAL;
3146         }
3147
3148         rte_spinlock_lock(&rx_adapter->rx_lock);
3149         dev_info->cb_fn = cb_fn;
3150         dev_info->cb_arg = cb_arg;
3151         rte_spinlock_unlock(&rx_adapter->rx_lock);
3152
3153         return 0;
3154 }
3155
3156 int
3157 rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
3158                         uint16_t eth_dev_id,
3159                         uint16_t rx_queue_id,
3160                         struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
3161 {
3162         struct rte_eventdev *dev;
3163         struct event_eth_rx_adapter *rx_adapter;
3164         struct eth_device_info *dev_info;
3165         struct eth_rx_queue_info *queue_info;
3166         struct rte_event *qi_ev;
3167         int ret;
3168
3169         if (rxa_memzone_lookup())
3170                 return -ENOMEM;
3171
3172         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3173         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3174
3175         if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3176                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3177                 return -EINVAL;
3178         }
3179
3180         if (queue_conf == NULL) {
3181                 RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3182                 return -EINVAL;
3183         }
3184
3185         rx_adapter = rxa_id_to_adapter(id);
3186         if (rx_adapter == NULL)
3187                 return -EINVAL;
3188
3189         dev_info = &rx_adapter->eth_devices[eth_dev_id];
3190         if (dev_info->rx_queue == NULL ||
3191             !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3192                 RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3193                 return -EINVAL;
3194         }
3195
3196         queue_info = &dev_info->rx_queue[rx_queue_id];
3197         qi_ev = (struct rte_event *)&queue_info->event;
3198
3199         memset(queue_conf, 0, sizeof(*queue_conf));
3200         queue_conf->rx_queue_flags = 0;
3201         if (queue_info->flow_id_mask != 0)
3202                 queue_conf->rx_queue_flags |=
3203                         RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3204         queue_conf->servicing_weight = queue_info->wt;
3205
3206         memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
3207
3208         dev = &rte_eventdevs[rx_adapter->eventdev_id];
3209         if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3210                 ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3211                                                 &rte_eth_devices[eth_dev_id],
3212                                                 rx_queue_id,
3213                                                 queue_conf);
3214                 return ret;
3215         }
3216
3217         return 0;
3218 }
3219
3220 #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
3221
3222 static int
3223 handle_rxa_stats(const char *cmd __rte_unused,
3224                  const char *params,
3225                  struct rte_tel_data *d)
3226 {
3227         uint8_t rx_adapter_id;
3228         struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3229
3230         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3231                 return -1;
3232
3233         /* Get Rx adapter ID from parameter string */
3234         rx_adapter_id = atoi(params);
3235         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3236
3237         /* Get Rx adapter stats */
3238         if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3239                                                &rx_adptr_stats)) {
3240                 RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
3241                 return -1;
3242         }
3243
3244         rte_tel_data_start_dict(d);
3245         rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3246         RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3247         RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3248         RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3249         RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3250         RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3251         RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3252         RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3253         RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3254         RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3255         RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3256         RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3257
3258         return 0;
3259 }
3260
3261 static int
3262 handle_rxa_stats_reset(const char *cmd __rte_unused,
3263                        const char *params,
3264                        struct rte_tel_data *d __rte_unused)
3265 {
3266         uint8_t rx_adapter_id;
3267
3268         if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
3269                 return -1;
3270
3271         /* Get Rx adapter ID from parameter string */
3272         rx_adapter_id = atoi(params);
3273         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3274
3275         /* Reset Rx adapter stats */
3276         if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3277                 RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
3278                 return -1;
3279         }
3280
3281         return 0;
3282 }
3283
3284 static int
3285 handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3286                           const char *params,
3287                           struct rte_tel_data *d)
3288 {
3289         uint8_t rx_adapter_id;
3290         uint16_t rx_queue_id;
3291         int eth_dev_id;
3292         char *token, *l_params;
3293         struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3294
3295         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3296                 return -1;
3297
3298         /* Get Rx adapter ID from parameter string */
3299         l_params = strdup(params);
3300         token = strtok(l_params, ",");
3301         rx_adapter_id = strtoul(token, NULL, 10);
3302         RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3303
3304         token = strtok(NULL, ",");
3305         if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3306                 return -1;
3307
3308         /* Get device ID from parameter string */
3309         eth_dev_id = strtoul(token, NULL, 10);
3310         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3311
3312         token = strtok(NULL, ",");
3313         if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3314                 return -1;
3315
3316         /* Get Rx queue ID from parameter string */
3317         rx_queue_id = strtoul(token, NULL, 10);
3318         if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3319                 RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3320                 return -EINVAL;
3321         }
3322
3323         token = strtok(NULL, "\0");
3324         if (token != NULL)
3325                 RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3326                                  " telemetry command, igrnoring");
3327
3328         if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3329                                                     rx_queue_id, &queue_conf)) {
3330                 RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3331                 return -1;
3332         }
3333
3334         rte_tel_data_start_dict(d);
3335         rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3336         rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3337         rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3338         RXA_ADD_DICT(queue_conf, rx_queue_flags);
3339         RXA_ADD_DICT(queue_conf, servicing_weight);
3340         RXA_ADD_DICT(queue_conf.ev, queue_id);
3341         RXA_ADD_DICT(queue_conf.ev, sched_type);
3342         RXA_ADD_DICT(queue_conf.ev, priority);
3343         RXA_ADD_DICT(queue_conf.ev, flow_id);
3344
3345         return 0;
3346 }
3347
3348 RTE_INIT(rxa_init_telemetry)
3349 {
3350         rte_telemetry_register_cmd("/eventdev/rxa_stats",
3351                 handle_rxa_stats,
3352                 "Returns Rx adapter stats. Parameter: rxa_id");
3353
3354         rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3355                 handle_rxa_stats_reset,
3356                 "Reset Rx adapter stats. Parameter: rxa_id");
3357
3358         rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3359                 handle_rxa_get_queue_conf,
3360                 "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
3361 }