2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2010-2014 Intel Corporation
4 * Copyright 2017 Cavium, Inc.
7 #include "pipeline_common.h"
9 static __rte_always_inline void
10 worker_fwd_event(struct rte_event *ev, uint8_t sched)
12 ev->event_type = RTE_EVENT_TYPE_CPU;
13 ev->op = RTE_EVENT_OP_FORWARD;
14 ev->sched_type = sched;
17 static __rte_always_inline void
18 worker_event_enqueue(const uint8_t dev, const uint8_t port,
21 while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
25 static __rte_always_inline void
26 worker_event_enqueue_burst(const uint8_t dev, const uint8_t port,
27 struct rte_event *ev, const uint16_t nb_rx)
31 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
33 enq += rte_event_enqueue_burst(dev, port,
34 ev + enq, nb_rx - enq);
38 static __rte_always_inline void
39 worker_tx_pkt(const uint8_t dev, const uint8_t port, struct rte_event *ev)
41 exchange_mac(ev->mbuf);
42 rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
43 while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
47 /* Single stage pipeline workers */
50 worker_do_tx_single(void *arg)
52 struct worker_data *data = (struct worker_data *)arg;
53 const uint8_t dev = data->dev_id;
54 const uint8_t port = data->port_id;
55 size_t fwd = 0, received = 0, tx = 0;
58 while (!fdata->done) {
60 if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
67 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
68 worker_tx_pkt(dev, port, &ev);
73 worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
74 worker_event_enqueue(dev, port, &ev);
80 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
81 rte_lcore_id(), received, fwd, tx);
86 worker_do_tx_single_atq(void *arg)
88 struct worker_data *data = (struct worker_data *)arg;
89 const uint8_t dev = data->dev_id;
90 const uint8_t port = data->port_id;
91 size_t fwd = 0, received = 0, tx = 0;
94 while (!fdata->done) {
96 if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
103 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
104 worker_tx_pkt(dev, port, &ev);
108 worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
109 worker_event_enqueue(dev, port, &ev);
115 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
116 rte_lcore_id(), received, fwd, tx);
121 worker_do_tx_single_burst(void *arg)
123 struct rte_event ev[BATCH_SIZE + 1];
125 struct worker_data *data = (struct worker_data *)arg;
126 const uint8_t dev = data->dev_id;
127 const uint8_t port = data->port_id;
128 size_t fwd = 0, received = 0, tx = 0;
130 while (!fdata->done) {
132 uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
141 for (i = 0; i < nb_rx; i++) {
142 rte_prefetch0(ev[i + 1].mbuf);
143 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
145 worker_tx_pkt(dev, port, &ev[i]);
146 ev[i].op = RTE_EVENT_OP_RELEASE;
151 worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
156 worker_event_enqueue_burst(dev, port, ev, nb_rx);
161 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
162 rte_lcore_id(), received, fwd, tx);
167 worker_do_tx_single_burst_atq(void *arg)
169 struct rte_event ev[BATCH_SIZE + 1];
171 struct worker_data *data = (struct worker_data *)arg;
172 const uint8_t dev = data->dev_id;
173 const uint8_t port = data->port_id;
174 size_t fwd = 0, received = 0, tx = 0;
176 while (!fdata->done) {
178 uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
188 for (i = 0; i < nb_rx; i++) {
189 rte_prefetch0(ev[i + 1].mbuf);
190 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
192 worker_tx_pkt(dev, port, &ev[i]);
193 ev[i].op = RTE_EVENT_OP_RELEASE;
196 worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
200 worker_event_enqueue_burst(dev, port, ev, nb_rx);
205 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
206 rte_lcore_id(), received, fwd, tx);
210 /* Multi stage Pipeline Workers */
213 worker_do_tx(void *arg)
217 struct worker_data *data = (struct worker_data *)arg;
218 const uint8_t dev = data->dev_id;
219 const uint8_t port = data->port_id;
220 const uint8_t lst_qid = cdata.num_stages - 1;
221 size_t fwd = 0, received = 0, tx = 0;
224 while (!fdata->done) {
226 if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
232 const uint8_t cq_id = ev.queue_id % cdata.num_stages;
234 if (cq_id >= lst_qid) {
235 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
236 worker_tx_pkt(dev, port, &ev);
241 worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
242 ev.queue_id = (cq_id == lst_qid) ?
243 cdata.next_qid[ev.queue_id] : ev.queue_id;
245 ev.queue_id = cdata.next_qid[ev.queue_id];
246 worker_fwd_event(&ev, cdata.queue_type);
250 worker_event_enqueue(dev, port, &ev);
255 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
256 rte_lcore_id(), received, fwd, tx);
262 worker_do_tx_atq(void *arg)
266 struct worker_data *data = (struct worker_data *)arg;
267 const uint8_t dev = data->dev_id;
268 const uint8_t port = data->port_id;
269 const uint8_t lst_qid = cdata.num_stages - 1;
270 size_t fwd = 0, received = 0, tx = 0;
272 while (!fdata->done) {
274 if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
280 const uint8_t cq_id = ev.sub_event_type % cdata.num_stages;
282 if (cq_id == lst_qid) {
283 if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
284 worker_tx_pkt(dev, port, &ev);
289 worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
292 worker_fwd_event(&ev, cdata.queue_type);
296 worker_event_enqueue(dev, port, &ev);
301 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
302 rte_lcore_id(), received, fwd, tx);
308 worker_do_tx_burst(void *arg)
310 struct rte_event ev[BATCH_SIZE];
312 struct worker_data *data = (struct worker_data *)arg;
313 uint8_t dev = data->dev_id;
314 uint8_t port = data->port_id;
315 uint8_t lst_qid = cdata.num_stages - 1;
316 size_t fwd = 0, received = 0, tx = 0;
318 while (!fdata->done) {
320 const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
329 for (i = 0; i < nb_rx; i++) {
330 const uint8_t cq_id = ev[i].queue_id % cdata.num_stages;
332 if (cq_id >= lst_qid) {
333 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
334 worker_tx_pkt(dev, port, &ev[i]);
336 ev[i].op = RTE_EVENT_OP_RELEASE;
339 ev[i].queue_id = (cq_id == lst_qid) ?
340 cdata.next_qid[ev[i].queue_id] :
343 worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
345 ev[i].queue_id = cdata.next_qid[ev[i].queue_id];
346 worker_fwd_event(&ev[i], cdata.queue_type);
350 worker_event_enqueue_burst(dev, port, ev, nb_rx);
356 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
357 rte_lcore_id(), received, fwd, tx);
363 worker_do_tx_burst_atq(void *arg)
365 struct rte_event ev[BATCH_SIZE];
367 struct worker_data *data = (struct worker_data *)arg;
368 uint8_t dev = data->dev_id;
369 uint8_t port = data->port_id;
370 uint8_t lst_qid = cdata.num_stages - 1;
371 size_t fwd = 0, received = 0, tx = 0;
373 while (!fdata->done) {
376 const uint16_t nb_rx = rte_event_dequeue_burst(dev, port,
385 for (i = 0; i < nb_rx; i++) {
386 const uint8_t cq_id = ev[i].sub_event_type %
389 if (cq_id == lst_qid) {
390 if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
391 worker_tx_pkt(dev, port, &ev[i]);
393 ev[i].op = RTE_EVENT_OP_RELEASE;
397 worker_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
399 ev[i].sub_event_type++;
400 worker_fwd_event(&ev[i], cdata.queue_type);
405 worker_event_enqueue_burst(dev, port, ev, nb_rx);
410 printf(" worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
411 rte_lcore_id(), received, fwd, tx);
417 setup_eventdev_worker_tx_enq(struct worker_data *worker_data)
420 const uint8_t atq = cdata.all_type_queues ? 1 : 0;
421 const uint8_t dev_id = 0;
422 const uint8_t nb_ports = cdata.num_workers;
423 uint8_t nb_slots = 0;
424 uint8_t nb_queues = rte_eth_dev_count_avail();
427 * In case where all type queues are not enabled, use queues equal to
428 * number of stages * eth_dev_count and one extra queue per pipeline
432 nb_queues *= cdata.num_stages;
433 nb_queues += rte_eth_dev_count_avail();
436 struct rte_event_dev_config config = {
437 .nb_event_queues = nb_queues,
438 .nb_event_ports = nb_ports,
439 .nb_single_link_event_port_queues = 0,
440 .nb_events_limit = 4096,
441 .nb_event_queue_flows = 1024,
442 .nb_event_port_dequeue_depth = 128,
443 .nb_event_port_enqueue_depth = 128,
445 struct rte_event_port_conf wkr_p_conf = {
446 .dequeue_depth = cdata.worker_cq_depth,
448 .new_event_threshold = 4096,
449 .event_port_cfg = RTE_EVENT_PORT_CFG_HINT_WORKER,
451 struct rte_event_queue_conf wkr_q_conf = {
452 .schedule_type = cdata.queue_type,
453 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
454 .nb_atomic_flows = 1024,
455 .nb_atomic_order_sequences = 1024,
458 int ret, ndev = rte_event_dev_count();
461 printf("%d: No Eventdev Devices Found\n", __LINE__);
466 struct rte_event_dev_info dev_info;
467 ret = rte_event_dev_info_get(dev_id, &dev_info);
468 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
470 if (dev_info.max_num_events < config.nb_events_limit)
471 config.nb_events_limit = dev_info.max_num_events;
472 if (dev_info.max_event_port_dequeue_depth <
473 config.nb_event_port_dequeue_depth)
474 config.nb_event_port_dequeue_depth =
475 dev_info.max_event_port_dequeue_depth;
476 if (dev_info.max_event_port_enqueue_depth <
477 config.nb_event_port_enqueue_depth)
478 config.nb_event_port_enqueue_depth =
479 dev_info.max_event_port_enqueue_depth;
481 ret = rte_event_dev_configure(dev_id, &config);
483 printf("%d: Error configuring device\n", __LINE__);
487 printf(" Stages:\n");
488 for (i = 0; i < nb_queues; i++) {
492 nb_slots = cdata.num_stages;
493 wkr_q_conf.event_queue_cfg =
494 RTE_EVENT_QUEUE_CFG_ALL_TYPES;
498 nb_slots = cdata.num_stages + 1;
500 wkr_q_conf.schedule_type = slot == cdata.num_stages ?
501 RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
504 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
505 printf("%d: error creating qid %d\n", __LINE__, i);
509 cdata.next_qid[i] = i+1;
510 if (cdata.enable_queue_priorities) {
511 const uint32_t prio_delta =
512 (RTE_EVENT_DEV_PRIORITY_LOWEST) /
515 /* higher priority for queues closer to tx */
516 wkr_q_conf.priority =
517 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta *
521 const char *type_str = "Atomic";
522 switch (wkr_q_conf.schedule_type) {
523 case RTE_SCHED_TYPE_ORDERED:
524 type_str = "Ordered";
526 case RTE_SCHED_TYPE_PARALLEL:
527 type_str = "Parallel";
530 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
531 wkr_q_conf.priority);
535 if (wkr_p_conf.new_event_threshold > config.nb_events_limit)
536 wkr_p_conf.new_event_threshold = config.nb_events_limit;
537 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
538 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
539 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
540 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
542 /* set up one port per worker, linking to all stage queues */
543 for (i = 0; i < cdata.num_workers; i++) {
544 struct worker_data *w = &worker_data[i];
546 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
547 printf("Error setting up port %d\n", i);
551 if (rte_event_port_link(dev_id, i, NULL, NULL, 0)
553 printf("%d: error creating link for port %d\n",
560 * Reduce the load on ingress event queue by splitting the traffic
561 * across multiple event queues.
562 * for example, nb_stages = 2 and nb_ethdev = 2 then
564 * nb_queues = (2 * 2) + 2 = 6 (non atq)
567 * So, traffic is split across queue 0 and queue 3 since queue id for
568 * rx adapter is chosen <ethport_id> * <rx_stride> i.e in the above
569 * case eth port 0, 1 will inject packets into event queue 0, 3
572 * This forms two set of queue pipelines 0->1->2->tx and 3->4->5->tx.
574 cdata.rx_stride = atq ? 1 : nb_slots;
575 ret = rte_event_dev_service_id_get(dev_id,
576 &fdata->evdev_service_id);
577 if (ret != -ESRCH && ret != 0) {
578 printf("Error getting the service ID\n");
581 rte_service_runstate_set(fdata->evdev_service_id, 1);
582 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
584 if (rte_event_dev_start(dev_id) < 0)
585 rte_exit(EXIT_FAILURE, "Error starting eventdev");
591 struct rx_adptr_services {
592 uint16_t nb_rx_adptrs;
593 uint32_t *rx_adpt_arr;
597 service_rx_adapter(void *arg)
600 struct rx_adptr_services *adptr_services = arg;
602 for (i = 0; i < adptr_services->nb_rx_adptrs; i++)
603 rte_service_run_iter_on_app_lcore(
604 adptr_services->rx_adpt_arr[i], 1);
609 * Initializes a given port using global settings and with the RX buffers
610 * coming from the mbuf_pool passed as a parameter.
613 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
615 struct rte_eth_rxconf rx_conf;
616 static const struct rte_eth_conf port_conf_default = {
618 .mq_mode = RTE_ETH_MQ_RX_RSS,
622 .rss_hf = RTE_ETH_RSS_IP |
628 const uint16_t rx_rings = 1, tx_rings = 1;
629 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
630 struct rte_eth_conf port_conf = port_conf_default;
633 struct rte_eth_dev_info dev_info;
634 struct rte_eth_txconf txconf;
636 if (!rte_eth_dev_is_valid_port(port))
639 retval = rte_eth_dev_info_get(port, &dev_info);
641 printf("Error during getting device (port %u) info: %s\n",
642 port, strerror(-retval));
646 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
647 port_conf.txmode.offloads |=
648 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
649 rx_conf = dev_info.default_rxconf;
650 rx_conf.offloads = port_conf.rxmode.offloads;
652 port_conf.rx_adv_conf.rss_conf.rss_hf &=
653 dev_info.flow_type_rss_offloads;
654 if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
655 port_conf_default.rx_adv_conf.rss_conf.rss_hf) {
656 printf("Port %u modified RSS hash function based on hardware support,"
657 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
659 port_conf_default.rx_adv_conf.rss_conf.rss_hf,
660 port_conf.rx_adv_conf.rss_conf.rss_hf);
663 /* Configure the Ethernet device. */
664 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
668 /* Allocate and set up 1 RX queue per Ethernet port. */
669 for (q = 0; q < rx_rings; q++) {
670 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
671 rte_eth_dev_socket_id(port), &rx_conf,
677 txconf = dev_info.default_txconf;
678 txconf.offloads = port_conf_default.txmode.offloads;
679 /* Allocate and set up 1 TX queue per Ethernet port. */
680 for (q = 0; q < tx_rings; q++) {
681 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
682 rte_eth_dev_socket_id(port), &txconf);
687 /* Display the port MAC address. */
688 struct rte_ether_addr addr;
689 retval = rte_eth_macaddr_get(port, &addr);
691 printf("Failed to get MAC address (port %u): %s\n",
692 port, rte_strerror(-retval));
696 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
697 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
698 (unsigned int)port, RTE_ETHER_ADDR_BYTES(&addr));
700 /* Enable RX in promiscuous mode for the Ethernet device. */
701 retval = rte_eth_promiscuous_enable(port);
709 init_ports(uint16_t num_ports)
714 cdata.num_mbuf = 16384 * num_ports;
716 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
717 /* mbufs */ cdata.num_mbuf,
718 /* cache_size */ 512,
720 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
723 RTE_ETH_FOREACH_DEV(portid)
724 if (port_init(portid, mp) != 0)
725 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
732 init_adapters(uint16_t nb_ports)
736 uint8_t evdev_id = 0;
737 struct rx_adptr_services *adptr_services = NULL;
738 struct rte_event_dev_info dev_info;
740 ret = rte_event_dev_info_get(evdev_id, &dev_info);
741 adptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);
743 struct rte_event_port_conf adptr_p_conf = {
744 .dequeue_depth = cdata.worker_cq_depth,
746 .new_event_threshold = 4096,
747 .event_port_cfg = RTE_EVENT_PORT_CFG_HINT_PRODUCER,
750 init_ports(nb_ports);
751 if (adptr_p_conf.new_event_threshold > dev_info.max_num_events)
752 adptr_p_conf.new_event_threshold = dev_info.max_num_events;
753 if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
754 adptr_p_conf.dequeue_depth =
755 dev_info.max_event_port_dequeue_depth;
756 if (adptr_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
757 adptr_p_conf.enqueue_depth =
758 dev_info.max_event_port_enqueue_depth;
760 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
761 memset(&queue_conf, 0, sizeof(queue_conf));
762 queue_conf.ev.sched_type = cdata.queue_type;
764 for (i = 0; i < nb_ports; i++) {
768 ret = rte_event_eth_rx_adapter_create(i, evdev_id,
771 rte_exit(EXIT_FAILURE,
772 "failed to create rx adapter[%d]", i);
774 ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
776 rte_exit(EXIT_FAILURE,
777 "failed to get event rx adapter "
780 queue_conf.ev.queue_id = cdata.rx_stride ?
781 (i * cdata.rx_stride)
782 : (uint8_t)cdata.qid[0];
784 ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &queue_conf);
786 rte_exit(EXIT_FAILURE,
787 "Failed to add queues to Rx adapter");
789 /* Producer needs to be scheduled. */
790 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
791 ret = rte_event_eth_rx_adapter_service_id_get(i,
793 if (ret != -ESRCH && ret != 0) {
794 rte_exit(EXIT_FAILURE,
795 "Error getting the service ID for rx adptr\n");
798 rte_service_runstate_set(service_id, 1);
799 rte_service_set_runstate_mapped_check(service_id, 0);
801 adptr_services->nb_rx_adptrs++;
802 adptr_services->rx_adpt_arr = rte_realloc(
803 adptr_services->rx_adpt_arr,
804 adptr_services->nb_rx_adptrs *
805 sizeof(uint32_t), 0);
806 adptr_services->rx_adpt_arr[
807 adptr_services->nb_rx_adptrs - 1] =
811 ret = rte_event_eth_rx_adapter_start(i);
813 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
817 /* We already know that Tx adapter has INTERNAL port cap*/
818 ret = rte_event_eth_tx_adapter_create(cdata.tx_adapter_id, evdev_id,
821 rte_exit(EXIT_FAILURE, "failed to create tx adapter[%d]",
822 cdata.tx_adapter_id);
824 for (i = 0; i < nb_ports; i++) {
825 ret = rte_event_eth_tx_adapter_queue_add(cdata.tx_adapter_id, i,
828 rte_exit(EXIT_FAILURE,
829 "Failed to add queues to Tx adapter");
832 ret = rte_event_eth_tx_adapter_start(cdata.tx_adapter_id);
834 rte_exit(EXIT_FAILURE, "Tx adapter[%d] start failed",
835 cdata.tx_adapter_id);
837 if (adptr_services->nb_rx_adptrs) {
838 struct rte_service_spec service;
840 memset(&service, 0, sizeof(struct rte_service_spec));
841 snprintf(service.name, sizeof(service.name), "rx_service");
842 service.callback = service_rx_adapter;
843 service.callback_userdata = (void *)adptr_services;
845 int32_t ret = rte_service_component_register(&service,
846 &fdata->rxadptr_service_id);
848 rte_exit(EXIT_FAILURE,
849 "Rx adapter service register failed");
851 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
852 rte_service_component_runstate_set(fdata->rxadptr_service_id,
854 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id,
857 memset(fdata->rx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
858 rte_free(adptr_services);
861 if (!adptr_services->nb_rx_adptrs && (dev_info.event_dev_cap &
862 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))
863 fdata->cap.scheduler = NULL;
867 worker_tx_enq_opt_check(void)
872 uint8_t rx_needed = 0;
873 uint8_t sched_needed = 0;
874 struct rte_event_dev_info eventdev_info;
876 memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
877 rte_event_dev_info_get(0, &eventdev_info);
879 if (cdata.all_type_queues && !(eventdev_info.event_dev_cap &
880 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES))
881 rte_exit(EXIT_FAILURE,
882 "Event dev doesn't support all type queues\n");
883 sched_needed = !(eventdev_info.event_dev_cap &
884 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED);
886 RTE_ETH_FOREACH_DEV(i) {
887 ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
889 rte_exit(EXIT_FAILURE,
890 "failed to get event rx adapter capabilities");
892 !(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
895 if (cdata.worker_lcore_mask == 0 ||
896 (rx_needed && cdata.rx_lcore_mask == 0) ||
897 (sched_needed && cdata.sched_lcore_mask == 0)) {
898 printf("Core part of pipeline was not assigned any cores. "
899 "This will stall the pipeline, please check core masks "
900 "(use -h for details on setting core masks):\n"
901 "\trx: %"PRIu64"\n\tsched: %"PRIu64
902 "\n\tworkers: %"PRIu64"\n", cdata.rx_lcore_mask,
903 cdata.sched_lcore_mask, cdata.worker_lcore_mask);
904 rte_exit(-1, "Fix core masks\n");
908 memset(fdata->sched_core, 0,
909 sizeof(unsigned int) * MAX_NUM_CORE);
911 memset(fdata->rx_core, 0,
912 sizeof(unsigned int) * MAX_NUM_CORE);
914 memset(fdata->tx_core, 0, sizeof(unsigned int) * MAX_NUM_CORE);
918 get_worker_loop_single_burst(uint8_t atq)
921 return worker_do_tx_single_burst_atq;
923 return worker_do_tx_single_burst;
927 get_worker_loop_single_non_burst(uint8_t atq)
930 return worker_do_tx_single_atq;
932 return worker_do_tx_single;
936 get_worker_loop_burst(uint8_t atq)
939 return worker_do_tx_burst_atq;
941 return worker_do_tx_burst;
945 get_worker_loop_non_burst(uint8_t atq)
948 return worker_do_tx_atq;
954 get_worker_single_stage(bool burst)
956 uint8_t atq = cdata.all_type_queues ? 1 : 0;
959 return get_worker_loop_single_burst(atq);
961 return get_worker_loop_single_non_burst(atq);
965 get_worker_multi_stage(bool burst)
967 uint8_t atq = cdata.all_type_queues ? 1 : 0;
970 return get_worker_loop_burst(atq);
972 return get_worker_loop_non_burst(atq);
976 set_worker_tx_enq_setup_data(struct setup_data *caps, bool burst)
978 if (cdata.num_stages == 1)
979 caps->worker = get_worker_single_stage(burst);
981 caps->worker = get_worker_multi_stage(burst);
983 caps->check_opt = worker_tx_enq_opt_check;
984 caps->scheduler = schedule_devices;
985 caps->evdev_setup = setup_eventdev_worker_tx_enq;
986 caps->adptr_setup = init_adapters;