1 #include <rte_cycles.h>
2 #include <rte_common.h>
5 #include <rte_ethdev.h>
7 #include <rte_malloc.h>
8 #include <rte_service_component.h>
11 #include "rte_eventdev.h"
12 #include "rte_eventdev_pmd.h"
13 #include "rte_event_eth_rx_adapter.h"
16 #define BLOCK_CNT_THRESHOLD 10
17 #define ETH_EVENT_BUFFER_SIZE (4*BATCH_SIZE)
19 #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
20 #define ETH_RX_ADAPTER_MEM_NAME_LEN 32
22 #define RSS_KEY_SIZE 40
25 * There is an instance of this struct per polled Rx queue added to the
28 struct eth_rx_poll_entry {
29 /* Eth port to poll */
31 /* Eth rx queue to poll */
35 /* Instance per adapter */
36 struct rte_eth_event_enqueue_buffer {
37 /* Count of events in this buffer */
39 /* Array of events in this buffer */
40 struct rte_event events[ETH_EVENT_BUFFER_SIZE];
43 struct rte_event_eth_rx_adapter {
45 uint8_t rss_key_be[RSS_KEY_SIZE];
46 /* Event device identifier */
48 /* Per ethernet device structure */
49 struct eth_device_info *eth_devices;
50 /* Event port identifier */
51 uint8_t event_port_id;
52 /* Lock to serialize config updates with service function */
53 rte_spinlock_t rx_lock;
54 /* Max mbufs processed in any service function invocation */
56 /* Receive queues that need to be polled */
57 struct eth_rx_poll_entry *eth_rx_poll;
58 /* Size of the eth_rx_poll array */
59 uint16_t num_rx_polled;
60 /* Weighted round robin schedule */
62 /* wrr_sched[] size */
64 /* Next entry in wrr[] to begin polling */
66 /* Event burst buffer */
67 struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
68 /* Per adapter stats */
69 struct rte_event_eth_rx_adapter_stats stats;
70 /* Block count, counts up to BLOCK_CNT_THRESHOLD */
71 uint16_t enq_block_count;
73 uint64_t rx_enq_block_start_ts;
74 /* Configuration callback for rte_service configuration */
75 rte_event_eth_rx_adapter_conf_cb conf_cb;
76 /* Configuration callback argument */
78 /* Set if default_cb is being used */
80 /* Service initialization state */
81 uint8_t service_inited;
82 /* Total count of Rx queues in adapter */
84 /* Memory allocation name */
85 char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
86 /* Socket identifier cached from eventdev */
88 /* Per adapter EAL service */
90 } __rte_cache_aligned;
93 struct eth_device_info {
94 struct rte_eth_dev *dev;
95 struct eth_rx_queue_info *rx_queue;
96 /* Set if ethdev->eventdev packet transfer uses a
99 uint8_t internal_event_port;
100 /* Set if the adapter is processing rx queues for
101 * this eth device and packet processing has been
102 * started, allows for the code to know if the PMD
103 * rx_adapter_stop callback needs to be invoked
105 uint8_t dev_rx_started;
106 /* If nb_dev_queues > 0, the start callback will
107 * be invoked if not already invoked
109 uint16_t nb_dev_queues;
113 struct eth_rx_queue_info {
114 int queue_enabled; /* True if added */
115 uint16_t wt; /* Polling weight */
116 uint8_t event_queue_id; /* Event queue to enqueue packets to */
117 uint8_t sched_type; /* Sched type for events */
118 uint8_t priority; /* Event priority */
119 uint32_t flow_id; /* App provided flow identifier */
120 uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
123 static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
128 return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
131 #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
132 if (!valid_id(id)) { \
133 RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
139 sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
141 return rx_adapter->num_rx_polled;
144 /* Greatest common divisor */
145 static uint16_t gcd_u16(uint16_t a, uint16_t b)
149 return r ? gcd_u16(b, r) : b;
152 /* Returns the next queue in the polling sequence
154 * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
157 wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
158 unsigned int n, int *cw,
159 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
160 uint16_t gcd, int prev)
176 q = eth_rx_poll[i].eth_rx_qid;
177 d = eth_rx_poll[i].eth_dev_id;
178 w = rx_adapter->eth_devices[d].rx_queue[q].wt;
185 /* Precalculate WRR polling sequence for all queues in rx_adapter */
187 eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
193 /* Initialize variables for calculation of wrr schedule */
194 uint16_t max_wrr_pos = 0;
195 unsigned int poll_q = 0;
199 struct eth_rx_poll_entry *rx_poll = NULL;
200 uint32_t *rx_wrr = NULL;
202 if (rx_adapter->num_rx_polled) {
203 size_t len = RTE_ALIGN(rx_adapter->num_rx_polled *
204 sizeof(*rx_adapter->eth_rx_poll),
205 RTE_CACHE_LINE_SIZE);
206 rx_poll = rte_zmalloc_socket(rx_adapter->mem_name,
209 rx_adapter->socket_id);
213 /* Generate array of all queues to poll, the size of this
216 RTE_ETH_FOREACH_DEV(d) {
217 uint16_t nb_rx_queues;
218 struct eth_device_info *dev_info =
219 &rx_adapter->eth_devices[d];
220 nb_rx_queues = dev_info->dev->data->nb_rx_queues;
221 if (dev_info->rx_queue == NULL)
223 for (q = 0; q < nb_rx_queues; q++) {
224 struct eth_rx_queue_info *queue_info =
225 &dev_info->rx_queue[q];
226 if (queue_info->queue_enabled == 0)
229 uint16_t wt = queue_info->wt;
230 rx_poll[poll_q].eth_dev_id = d;
231 rx_poll[poll_q].eth_rx_qid = q;
233 max_wt = RTE_MAX(max_wt, wt);
234 gcd = (gcd) ? gcd_u16(gcd, wt) : wt;
239 len = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),
240 RTE_CACHE_LINE_SIZE);
241 rx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,
244 rx_adapter->socket_id);
245 if (rx_wrr == NULL) {
250 /* Generate polling sequence based on weights */
253 for (i = 0; i < max_wrr_pos; i++) {
254 rx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,
255 rx_poll, max_wt, gcd, prev);
260 rte_free(rx_adapter->eth_rx_poll);
261 rte_free(rx_adapter->wrr_sched);
263 rx_adapter->eth_rx_poll = rx_poll;
264 rx_adapter->wrr_sched = rx_wrr;
265 rx_adapter->wrr_len = max_wrr_pos;
271 mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
272 struct ipv6_hdr **ipv6_hdr)
274 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
275 struct vlan_hdr *vlan_hdr;
280 switch (eth_hdr->ether_type) {
281 case RTE_BE16(ETHER_TYPE_IPv4):
282 *ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
285 case RTE_BE16(ETHER_TYPE_IPv6):
286 *ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
289 case RTE_BE16(ETHER_TYPE_VLAN):
290 vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
291 switch (vlan_hdr->eth_proto) {
292 case RTE_BE16(ETHER_TYPE_IPv4):
293 *ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);
295 case RTE_BE16(ETHER_TYPE_IPv6):
296 *ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);
308 /* Calculate RSS hash for IPv4/6 */
309 static inline uint32_t
310 do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
314 struct rte_ipv4_tuple ipv4_tuple;
315 struct rte_ipv6_tuple ipv6_tuple;
316 struct ipv4_hdr *ipv4_hdr;
317 struct ipv6_hdr *ipv6_hdr;
319 mtoip(m, &ipv4_hdr, &ipv6_hdr);
322 ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
323 ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
325 input_len = RTE_THASH_V4_L3_LEN;
326 } else if (ipv6_hdr) {
327 rte_thash_load_v6_addrs(ipv6_hdr,
328 (union rte_thash_tuple *)&ipv6_tuple);
330 input_len = RTE_THASH_V6_L3_LEN;
334 return rte_softrss_be(tuple, input_len, rss_key_be);
338 rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
340 return !!rx_adapter->enq_block_count;
344 rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
346 if (rx_adapter->rx_enq_block_start_ts)
349 rx_adapter->enq_block_count++;
350 if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
353 rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
357 rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
358 struct rte_event_eth_rx_adapter_stats *stats)
360 if (unlikely(!stats->rx_enq_start_ts))
361 stats->rx_enq_start_ts = rte_get_tsc_cycles();
363 if (likely(!rx_enq_blocked(rx_adapter)))
366 rx_adapter->enq_block_count = 0;
367 if (rx_adapter->rx_enq_block_start_ts) {
368 stats->rx_enq_end_ts = rte_get_tsc_cycles();
369 stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
370 rx_adapter->rx_enq_block_start_ts;
371 rx_adapter->rx_enq_block_start_ts = 0;
375 /* Add event to buffer, free space check is done prior to calling
379 buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
380 struct rte_event *ev)
382 struct rte_eth_event_enqueue_buffer *buf =
383 &rx_adapter->event_enqueue_buffer;
384 rte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event));
387 /* Enqueue buffered events to event device */
388 static inline uint16_t
389 flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
391 struct rte_eth_event_enqueue_buffer *buf =
392 &rx_adapter->event_enqueue_buffer;
393 struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
395 uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
396 rx_adapter->event_port_id,
399 if (n != buf->count) {
402 (buf->count - n) * sizeof(struct rte_event));
403 stats->rx_enq_retry++;
406 n ? rx_enq_block_end_ts(rx_adapter, stats) :
407 rx_enq_block_start_ts(rx_adapter);
410 stats->rx_enq_count += n;
416 fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
418 uint16_t rx_queue_id,
419 struct rte_mbuf **mbufs,
423 struct eth_device_info *eth_device_info =
424 &rx_adapter->eth_devices[dev_id];
425 struct eth_rx_queue_info *eth_rx_queue_info =
426 ð_device_info->rx_queue[rx_queue_id];
428 int32_t qid = eth_rx_queue_info->event_queue_id;
429 uint8_t sched_type = eth_rx_queue_info->sched_type;
430 uint8_t priority = eth_rx_queue_info->priority;
432 struct rte_event events[BATCH_SIZE];
433 struct rte_mbuf *m = mbufs[0];
439 /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
440 rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
441 do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
443 if ((m->ol_flags & PKT_RX_TIMESTAMP) == 0) {
444 ts = rte_get_tsc_cycles();
445 for (i = 0; i < num; i++) {
449 m->ol_flags |= PKT_RX_TIMESTAMP;
453 for (i = 0; i < num; i++) {
455 struct rte_event *ev = &events[i];
458 do_softrss(m, rx_adapter->rss_key_be) : m->hash.rss;
460 eth_rx_queue_info->flow_id &
461 eth_rx_queue_info->flow_id_mask;
462 flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
463 ev->flow_id = flow_id;
464 ev->op = RTE_EVENT_OP_NEW;
465 ev->sched_type = sched_type;
467 ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
468 ev->sub_event_type = 0;
469 ev->priority = priority;
472 buf_event_enqueue(rx_adapter, ev);
477 * Polls receive queues added to the event adapter and enqueues received
478 * packets to the event device.
480 * The receive code enqueues initially to a temporary buffer, the
481 * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
483 * If there isn't space available in the temporary buffer, packets from the
484 * Rx queue aren't dequeued from the eth device, this back pressures the
485 * eth device, in virtual device environments this back pressure is relayed to
486 * the hypervisor's switching layer where adjustments can be made to deal with
489 static inline uint32_t
490 eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
495 struct rte_mbuf *mbufs[BATCH_SIZE];
496 struct rte_eth_event_enqueue_buffer *buf;
500 wrr_pos = rx_adapter->wrr_pos;
501 max_nb_rx = rx_adapter->max_nb_rx;
502 buf = &rx_adapter->event_enqueue_buffer;
503 struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
505 /* Iterate through a WRR sequence */
506 for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
507 unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
508 uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
509 uint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
511 /* Don't do a batch dequeue from the rx queue if there isn't
512 * enough space in the enqueue buffer.
514 if (buf->count >= BATCH_SIZE)
515 flush_event_buffer(rx_adapter);
516 if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))
519 stats->rx_poll_count++;
520 n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
523 stats->rx_packets += n;
524 /* The check before rte_eth_rx_burst() ensures that
525 * all n mbufs can be buffered
527 fill_event_buffer(rx_adapter, d, qid, mbufs, n);
529 if (nb_rx > max_nb_rx) {
530 rx_adapter->wrr_pos =
531 (wrr_pos + 1) % rx_adapter->wrr_len;
536 if (++wrr_pos == rx_adapter->wrr_len)
544 event_eth_rx_adapter_service_func(void *args)
546 struct rte_event_eth_rx_adapter *rx_adapter = args;
547 struct rte_eth_event_enqueue_buffer *buf;
549 buf = &rx_adapter->event_enqueue_buffer;
550 if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
552 if (eth_rx_poll(rx_adapter) == 0 && buf->count)
553 flush_event_buffer(rx_adapter);
554 rte_spinlock_unlock(&rx_adapter->rx_lock);
559 rte_event_eth_rx_adapter_init(void)
561 const char *name = "rte_event_eth_rx_adapter_array";
562 const struct rte_memzone *mz;
565 sz = sizeof(*event_eth_rx_adapter) *
566 RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
567 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
569 mz = rte_memzone_lookup(name);
571 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
572 RTE_CACHE_LINE_SIZE);
574 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
580 event_eth_rx_adapter = mz->addr;
584 static inline struct rte_event_eth_rx_adapter *
585 id_to_rx_adapter(uint8_t id)
587 return event_eth_rx_adapter ?
588 event_eth_rx_adapter[id] : NULL;
592 default_conf_cb(uint8_t id, uint8_t dev_id,
593 struct rte_event_eth_rx_adapter_conf *conf, void *arg)
596 struct rte_eventdev *dev;
597 struct rte_event_dev_config dev_conf;
600 struct rte_event_port_conf *port_conf = arg;
601 struct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);
603 dev = &rte_eventdevs[rx_adapter->eventdev_id];
604 dev_conf = dev->data->dev_conf;
606 started = dev->data->dev_started;
608 rte_event_dev_stop(dev_id);
609 port_id = dev_conf.nb_event_ports;
610 dev_conf.nb_event_ports += 1;
611 ret = rte_event_dev_configure(dev_id, &dev_conf);
613 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
616 if (rte_event_dev_start(dev_id))
622 ret = rte_event_port_setup(dev_id, port_id, port_conf);
624 RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
629 conf->event_port_id = port_id;
630 conf->max_nb_rx = 128;
632 ret = rte_event_dev_start(dev_id);
633 rx_adapter->default_cb_arg = 1;
638 init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
641 struct rte_service_spec service;
642 struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
644 if (rx_adapter->service_inited)
647 memset(&service, 0, sizeof(service));
648 snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
649 "rte_event_eth_rx_adapter_%d", id);
650 service.socket_id = rx_adapter->socket_id;
651 service.callback = event_eth_rx_adapter_service_func;
652 service.callback_userdata = rx_adapter;
653 /* Service function handles locking for queue add/del updates */
654 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
655 ret = rte_service_component_register(&service, &rx_adapter->service_id);
657 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
662 ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
663 &rx_adapter_conf, rx_adapter->conf_arg);
665 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
669 rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
670 rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
671 rx_adapter->service_inited = 1;
675 rte_service_component_unregister(rx_adapter->service_id);
681 update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
682 struct eth_device_info *dev_info,
686 struct eth_rx_queue_info *queue_info;
690 if (dev_info->rx_queue == NULL)
693 if (rx_queue_id == -1) {
694 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
695 update_queue_info(rx_adapter, dev_info, i, add);
697 queue_info = &dev_info->rx_queue[rx_queue_id];
698 enabled = queue_info->queue_enabled;
700 rx_adapter->nb_queues += !enabled;
701 dev_info->nb_dev_queues += !enabled;
703 rx_adapter->nb_queues -= enabled;
704 dev_info->nb_dev_queues -= enabled;
706 queue_info->queue_enabled = !!add;
711 event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,
712 struct eth_device_info *dev_info,
713 uint16_t rx_queue_id)
715 struct eth_rx_queue_info *queue_info;
717 if (rx_adapter->nb_queues == 0)
720 queue_info = &dev_info->rx_queue[rx_queue_id];
721 rx_adapter->num_rx_polled -= queue_info->queue_enabled;
722 update_queue_info(rx_adapter, dev_info, rx_queue_id, 0);
727 event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,
728 struct eth_device_info *dev_info,
729 uint16_t rx_queue_id,
730 const struct rte_event_eth_rx_adapter_queue_conf *conf)
733 struct eth_rx_queue_info *queue_info;
734 const struct rte_event *ev = &conf->ev;
736 queue_info = &dev_info->rx_queue[rx_queue_id];
737 queue_info->event_queue_id = ev->queue_id;
738 queue_info->sched_type = ev->sched_type;
739 queue_info->priority = ev->priority;
740 queue_info->wt = conf->servicing_weight;
742 if (conf->rx_queue_flags &
743 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
744 queue_info->flow_id = ev->flow_id;
745 queue_info->flow_id_mask = ~0;
748 /* The same queue can be added more than once */
749 rx_adapter->num_rx_polled += !queue_info->queue_enabled;
750 update_queue_info(rx_adapter, dev_info, rx_queue_id, 1);
753 static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
756 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
758 struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
759 struct rte_event_eth_rx_adapter_queue_conf temp_conf;
763 if (queue_conf->servicing_weight == 0) {
765 struct rte_eth_dev_data *data = dev_info->dev->data;
766 if (data->dev_conf.intr_conf.rxq) {
767 RTE_EDEV_LOG_ERR("Interrupt driven queues"
771 temp_conf = *queue_conf;
773 /* If Rx interrupts are disabled set wt = 1 */
774 temp_conf.servicing_weight = 1;
775 queue_conf = &temp_conf;
778 if (dev_info->rx_queue == NULL) {
780 rte_zmalloc_socket(rx_adapter->mem_name,
781 dev_info->dev->data->nb_rx_queues *
782 sizeof(struct eth_rx_queue_info), 0,
783 rx_adapter->socket_id);
784 if (dev_info->rx_queue == NULL)
788 if (rx_queue_id == -1) {
789 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
790 event_eth_rx_adapter_queue_add(rx_adapter,
794 event_eth_rx_adapter_queue_add(rx_adapter, dev_info,
795 (uint16_t)rx_queue_id,
799 ret = eth_poll_wrr_calc(rx_adapter);
801 event_eth_rx_adapter_queue_del(rx_adapter,
802 dev_info, rx_queue_id);
810 rx_adapter_ctrl(uint8_t id, int start)
812 struct rte_event_eth_rx_adapter *rx_adapter;
813 struct rte_eventdev *dev;
814 struct eth_device_info *dev_info;
819 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
820 rx_adapter = id_to_rx_adapter(id);
821 if (rx_adapter == NULL)
824 dev = &rte_eventdevs[rx_adapter->eventdev_id];
826 RTE_ETH_FOREACH_DEV(i) {
827 dev_info = &rx_adapter->eth_devices[i];
828 /* if start check for num dev queues */
829 if (start && !dev_info->nb_dev_queues)
831 /* if stop check if dev has been started */
832 if (stop && !dev_info->dev_rx_started)
834 use_service |= !dev_info->internal_event_port;
835 dev_info->dev_rx_started = start;
836 if (dev_info->internal_event_port == 0)
838 start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
839 &rte_eth_devices[i]) :
840 (*dev->dev_ops->eth_rx_adapter_stop)(dev,
841 &rte_eth_devices[i]);
845 rte_service_runstate_set(rx_adapter->service_id, start);
851 rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
852 rte_event_eth_rx_adapter_conf_cb conf_cb,
855 struct rte_event_eth_rx_adapter *rx_adapter;
859 char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
860 const uint8_t default_rss_key[] = {
861 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
862 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
863 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
864 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
865 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
868 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
869 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
873 if (event_eth_rx_adapter == NULL) {
874 ret = rte_event_eth_rx_adapter_init();
879 rx_adapter = id_to_rx_adapter(id);
880 if (rx_adapter != NULL) {
881 RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
885 socket_id = rte_event_dev_socket_id(dev_id);
886 snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
887 "rte_event_eth_rx_adapter_%d",
890 rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
891 RTE_CACHE_LINE_SIZE, socket_id);
892 if (rx_adapter == NULL) {
893 RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
897 rx_adapter->eventdev_id = dev_id;
898 rx_adapter->socket_id = socket_id;
899 rx_adapter->conf_cb = conf_cb;
900 rx_adapter->conf_arg = conf_arg;
901 strcpy(rx_adapter->mem_name, mem_name);
902 rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
903 rte_eth_dev_count() *
904 sizeof(struct eth_device_info), 0,
906 rte_convert_rss_key((const uint32_t *)default_rss_key,
907 (uint32_t *)rx_adapter->rss_key_be,
908 RTE_DIM(default_rss_key));
910 if (rx_adapter->eth_devices == NULL) {
911 RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
912 rte_free(rx_adapter);
915 rte_spinlock_init(&rx_adapter->rx_lock);
916 RTE_ETH_FOREACH_DEV(i)
917 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
919 event_eth_rx_adapter[id] = rx_adapter;
920 if (conf_cb == default_conf_cb)
921 rx_adapter->default_cb_arg = 1;
926 rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
927 struct rte_event_port_conf *port_config)
929 struct rte_event_port_conf *pc;
932 if (port_config == NULL)
934 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
936 pc = rte_malloc(NULL, sizeof(*pc), 0);
940 ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
949 rte_event_eth_rx_adapter_free(uint8_t id)
951 struct rte_event_eth_rx_adapter *rx_adapter;
953 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
955 rx_adapter = id_to_rx_adapter(id);
956 if (rx_adapter == NULL)
959 if (rx_adapter->nb_queues) {
960 RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
961 rx_adapter->nb_queues);
965 if (rx_adapter->default_cb_arg)
966 rte_free(rx_adapter->conf_arg);
967 rte_free(rx_adapter->eth_devices);
968 rte_free(rx_adapter);
969 event_eth_rx_adapter[id] = NULL;
975 rte_event_eth_rx_adapter_queue_add(uint8_t id,
978 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
982 struct rte_event_eth_rx_adapter *rx_adapter;
983 struct rte_eventdev *dev;
984 struct eth_device_info *dev_info;
987 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
988 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
990 rx_adapter = id_to_rx_adapter(id);
991 if ((rx_adapter == NULL) || (queue_conf == NULL))
994 dev = &rte_eventdevs[rx_adapter->eventdev_id];
995 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
999 RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
1000 "eth port %" PRIu8, id, eth_dev_id);
1004 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
1005 && (queue_conf->rx_queue_flags &
1006 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
1007 RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
1008 " eth port: %" PRIu8 " adapter id: %" PRIu8,
1013 if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
1014 (rx_queue_id != -1)) {
1015 RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
1016 "event queue id %u eth port %u", id, eth_dev_id);
1020 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
1021 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
1022 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
1023 (uint16_t)rx_queue_id);
1028 dev_info = &rx_adapter->eth_devices[eth_dev_id];
1030 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
1031 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
1033 if (dev_info->rx_queue == NULL) {
1034 dev_info->rx_queue =
1035 rte_zmalloc_socket(rx_adapter->mem_name,
1036 dev_info->dev->data->nb_rx_queues *
1037 sizeof(struct eth_rx_queue_info), 0,
1038 rx_adapter->socket_id);
1039 if (dev_info->rx_queue == NULL)
1043 ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
1044 &rte_eth_devices[eth_dev_id],
1045 rx_queue_id, queue_conf);
1047 update_queue_info(rx_adapter,
1048 &rx_adapter->eth_devices[eth_dev_id],
1053 rte_spinlock_lock(&rx_adapter->rx_lock);
1054 ret = init_service(rx_adapter, id);
1056 ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,
1058 rte_spinlock_unlock(&rx_adapter->rx_lock);
1060 start_service = !!sw_rx_adapter_queue_count(rx_adapter);
1067 rte_service_component_runstate_set(rx_adapter->service_id, 1);
1073 rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
1074 int32_t rx_queue_id)
1077 struct rte_eventdev *dev;
1078 struct rte_event_eth_rx_adapter *rx_adapter;
1079 struct eth_device_info *dev_info;
1083 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1084 RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
1086 rx_adapter = id_to_rx_adapter(id);
1087 if (rx_adapter == NULL)
1090 dev = &rte_eventdevs[rx_adapter->eventdev_id];
1091 ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
1097 if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
1098 rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
1099 RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
1100 (uint16_t)rx_queue_id);
1104 dev_info = &rx_adapter->eth_devices[eth_dev_id];
1106 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
1107 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
1109 ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
1110 &rte_eth_devices[eth_dev_id],
1113 update_queue_info(rx_adapter,
1114 &rx_adapter->eth_devices[eth_dev_id],
1117 if (dev_info->nb_dev_queues == 0) {
1118 rte_free(dev_info->rx_queue);
1119 dev_info->rx_queue = NULL;
1124 rte_spinlock_lock(&rx_adapter->rx_lock);
1125 if (rx_queue_id == -1) {
1126 for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
1127 event_eth_rx_adapter_queue_del(rx_adapter,
1131 event_eth_rx_adapter_queue_del(rx_adapter,
1133 (uint16_t)rx_queue_id);
1136 rc = eth_poll_wrr_calc(rx_adapter);
1138 RTE_EDEV_LOG_ERR("WRR recalculation failed %" PRId32,
1141 if (dev_info->nb_dev_queues == 0) {
1142 rte_free(dev_info->rx_queue);
1143 dev_info->rx_queue = NULL;
1146 rte_spinlock_unlock(&rx_adapter->rx_lock);
1147 rte_service_component_runstate_set(rx_adapter->service_id,
1148 sw_rx_adapter_queue_count(rx_adapter));
1156 rte_event_eth_rx_adapter_start(uint8_t id)
1158 return rx_adapter_ctrl(id, 1);
1162 rte_event_eth_rx_adapter_stop(uint8_t id)
1164 return rx_adapter_ctrl(id, 0);
1168 rte_event_eth_rx_adapter_stats_get(uint8_t id,
1169 struct rte_event_eth_rx_adapter_stats *stats)
1171 struct rte_event_eth_rx_adapter *rx_adapter;
1172 struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
1173 struct rte_event_eth_rx_adapter_stats dev_stats;
1174 struct rte_eventdev *dev;
1175 struct eth_device_info *dev_info;
1179 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1181 rx_adapter = id_to_rx_adapter(id);
1182 if (rx_adapter == NULL || stats == NULL)
1185 dev = &rte_eventdevs[rx_adapter->eventdev_id];
1186 memset(stats, 0, sizeof(*stats));
1187 RTE_ETH_FOREACH_DEV(i) {
1188 dev_info = &rx_adapter->eth_devices[i];
1189 if (dev_info->internal_event_port == 0 ||
1190 dev->dev_ops->eth_rx_adapter_stats_get == NULL)
1192 ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
1193 &rte_eth_devices[i],
1197 dev_stats_sum.rx_packets += dev_stats.rx_packets;
1198 dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
1201 if (rx_adapter->service_inited)
1202 *stats = rx_adapter->stats;
1204 stats->rx_packets += dev_stats_sum.rx_packets;
1205 stats->rx_enq_count += dev_stats_sum.rx_enq_count;
1210 rte_event_eth_rx_adapter_stats_reset(uint8_t id)
1212 struct rte_event_eth_rx_adapter *rx_adapter;
1213 struct rte_eventdev *dev;
1214 struct eth_device_info *dev_info;
1217 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1219 rx_adapter = id_to_rx_adapter(id);
1220 if (rx_adapter == NULL)
1223 dev = &rte_eventdevs[rx_adapter->eventdev_id];
1224 RTE_ETH_FOREACH_DEV(i) {
1225 dev_info = &rx_adapter->eth_devices[i];
1226 if (dev_info->internal_event_port == 0 ||
1227 dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
1229 (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
1230 &rte_eth_devices[i]);
1233 memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
1238 rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1240 struct rte_event_eth_rx_adapter *rx_adapter;
1242 RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1244 rx_adapter = id_to_rx_adapter(id);
1245 if (rx_adapter == NULL || service_id == NULL)
1248 if (rx_adapter->service_inited)
1249 *service_id = rx_adapter->service_id;
1251 return rx_adapter->service_inited ? 0 : -ESRCH;