1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
13 #include <rte_mempool.h>
15 #include <rte_launch.h>
16 #include <rte_malloc.h>
17 #include <rte_random.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
20 #include <rte_eventdev.h>
21 #include <rte_service.h>
23 #define MAX_NUM_STAGES 8
25 #define MAX_NUM_CORE 64
31 unsigned int num_nic_ports;
32 } __rte_cache_aligned;
38 } __rte_cache_aligned;
40 static struct prod_data prod_data;
41 static struct cons_data cons_data;
46 } __rte_cache_aligned;
48 struct fastpath_data {
53 uint32_t evdev_service_id;
57 unsigned int rx_core[MAX_NUM_CORE];
58 unsigned int tx_core[MAX_NUM_CORE];
59 unsigned int sched_core[MAX_NUM_CORE];
60 unsigned int worker_core[MAX_NUM_CORE];
61 struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
64 static struct fastpath_data *fdata;
67 unsigned int active_cores;
68 unsigned int num_workers;
70 unsigned int num_fids;
73 int enable_queue_priorities;
77 unsigned int num_stages;
78 unsigned int worker_cq_depth;
79 int16_t next_qid[MAX_NUM_STAGES+2];
80 int16_t qid[MAX_NUM_STAGES];
83 static struct config_data cdata = {
84 .num_packets = (1L << 25), /* do ~32M packets */
86 .queue_type = RTE_SCHED_TYPE_ATOMIC,
94 core_in_use(unsigned int lcore_id) {
95 return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
96 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
100 eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
103 int port_id = (uintptr_t) userdata;
104 unsigned int _sent = 0;
107 /* Note: hard-coded TX queue */
108 _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
110 } while (_sent != unsent);
116 const uint64_t freq_khz = rte_get_timer_hz() / 1000;
117 struct rte_event packets[BATCH_SIZE];
119 static uint64_t received;
120 static uint64_t last_pkts;
121 static uint64_t last_time;
122 static uint64_t start_time;
124 uint8_t dev_id = cons_data.dev_id;
125 uint8_t port_id = cons_data.port_id;
127 uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
128 packets, RTE_DIM(packets), 0);
131 for (j = 0; j < rte_eth_dev_count(); j++)
132 rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
136 last_time = start_time = rte_get_timer_cycles();
139 for (i = 0; i < n; i++) {
140 uint8_t outport = packets[i].mbuf->port;
141 rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
144 packets[i].op = RTE_EVENT_OP_RELEASE;
147 if (cons_data.release) {
150 nb_tx = rte_event_enqueue_burst(dev_id, port_id, packets, n);
152 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
157 /* Print out mpps every 1<22 packets */
158 if (!cdata.quiet && received >= last_pkts + (1<<22)) {
159 const uint64_t now = rte_get_timer_cycles();
160 const uint64_t total_ms = (now - start_time) / freq_khz;
161 const uint64_t delta_ms = (now - last_time) / freq_khz;
162 uint64_t delta_pkts = received - last_pkts;
164 printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
165 "avg %.3f mpps [current %.3f mpps]\n",
168 received / (total_ms * 1000.0),
169 delta_pkts / (delta_ms * 1000.0));
170 last_pkts = received;
174 cdata.num_packets -= n;
175 if (cdata.num_packets <= 0)
184 static uint8_t eth_port;
185 struct rte_mbuf *mbufs[BATCH_SIZE+2];
186 struct rte_event ev[BATCH_SIZE+2];
187 uint32_t i, num_ports = prod_data.num_nic_ports;
188 int32_t qid = prod_data.qid;
189 uint8_t dev_id = prod_data.dev_id;
190 uint8_t port_id = prod_data.port_id;
191 uint32_t prio_idx = 0;
193 const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
194 if (++eth_port == num_ports)
201 for (i = 0; i < nb_rx; i++) {
202 ev[i].flow_id = mbufs[i]->hash.rss;
203 ev[i].op = RTE_EVENT_OP_NEW;
204 ev[i].sched_type = cdata.queue_type;
205 ev[i].queue_id = qid;
206 ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
207 ev[i].sub_event_type = 0;
208 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
209 ev[i].mbuf = mbufs[i];
210 RTE_SET_USED(prio_idx);
213 const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
214 if (nb_tx != nb_rx) {
215 for (i = nb_tx; i < nb_rx; i++)
216 rte_pktmbuf_free(mbufs[i]);
223 schedule_devices(unsigned int lcore_id)
225 if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
226 rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
228 rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
231 if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
232 rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
233 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
234 if (cdata.dump_dev_signal) {
235 rte_event_dev_dump(0, stdout);
236 cdata.dump_dev_signal = 0;
238 rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
241 if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
242 rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
244 rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
249 work(struct rte_mbuf *m)
251 struct ether_hdr *eth;
252 struct ether_addr addr;
254 /* change mac addresses on packet (to use mbuf data) */
256 * FIXME Swap mac address properly and also handle the
257 * case for both odd and even number of stages that the
258 * addresses end up the same at the end of the pipeline
260 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
261 ether_addr_copy(ð->d_addr, &addr);
262 ether_addr_copy(&addr, ð->d_addr);
264 /* do a number of cycles of work per packet */
265 volatile uint64_t start_tsc = rte_rdtsc();
266 while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
273 struct rte_event events[BATCH_SIZE];
275 struct worker_data *data = (struct worker_data *)arg;
276 uint8_t dev_id = data->dev_id;
277 uint8_t port_id = data->port_id;
278 size_t sent = 0, received = 0;
279 unsigned int lcore_id = rte_lcore_id();
281 while (!fdata->done) {
284 schedule_devices(lcore_id);
286 if (!fdata->worker_core[lcore_id]) {
291 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
292 events, RTE_DIM(events), 0);
300 for (i = 0; i < nb_rx; i++) {
302 /* The first worker stage does classification */
303 if (events[i].queue_id == cdata.qid[0])
304 events[i].flow_id = events[i].mbuf->hash.rss
307 events[i].queue_id = cdata.next_qid[events[i].queue_id];
308 events[i].op = RTE_EVENT_OP_FORWARD;
309 events[i].sched_type = cdata.queue_type;
311 work(events[i].mbuf);
313 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
315 while (nb_tx < nb_rx && !fdata->done)
316 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
323 printf(" worker %u thread done. RX=%zu TX=%zu\n",
324 rte_lcore_id(), received, sent);
330 * Parse the coremask given as argument (hexadecimal string) and fill
331 * the global configuration (core role and core count) with the parsed
334 static int xdigit2val(unsigned char c)
348 parse_coremask(const char *coremask)
351 unsigned int count = 0;
355 const int32_t BITS_HEX = 4;
357 if (coremask == NULL)
359 /* Remove all blank characters ahead and after .
360 * Remove 0x/0X if exists.
362 while (isblank(*coremask))
364 if (coremask[0] == '0' && ((coremask[1] == 'x')
365 || (coremask[1] == 'X')))
367 i = strlen(coremask);
368 while ((i > 0) && isblank(coremask[i - 1]))
373 for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
375 if (isxdigit(c) == 0) {
376 /* invalid characters */
380 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
381 if ((1 << j) & val) {
382 mask |= (1UL << idx);
388 if (coremask[i] != '0')
395 static struct option long_options[] = {
396 {"workers", required_argument, 0, 'w'},
397 {"packets", required_argument, 0, 'n'},
398 {"atomic-flows", required_argument, 0, 'f'},
399 {"num_stages", required_argument, 0, 's'},
400 {"rx-mask", required_argument, 0, 'r'},
401 {"tx-mask", required_argument, 0, 't'},
402 {"sched-mask", required_argument, 0, 'e'},
403 {"cq-depth", required_argument, 0, 'c'},
404 {"work-cycles", required_argument, 0, 'W'},
405 {"queue-priority", no_argument, 0, 'P'},
406 {"parallel", no_argument, 0, 'p'},
407 {"ordered", no_argument, 0, 'o'},
408 {"quiet", no_argument, 0, 'q'},
409 {"dump", no_argument, 0, 'D'},
416 const char *usage_str =
417 " Usage: eventdev_demo [options]\n"
419 " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
420 " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
421 " -s, --num_stages=N Use N atomic stages (default 1)\n"
422 " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
423 " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
424 " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
425 " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
426 " -c --cq-depth=N Worker CQ depth (default 16)\n"
427 " -W --work-cycles=N Worker cycles (default 0)\n"
428 " -P --queue-priority Enable scheduler queue prioritization\n"
429 " -o, --ordered Use ordered scheduling\n"
430 " -p, --parallel Use parallel scheduling\n"
431 " -q, --quiet Minimize printed output\n"
432 " -D, --dump Print detailed statistics before exit"
434 fprintf(stderr, "%s", usage_str);
439 parse_app_args(int argc, char **argv)
441 /* Parse cli options*/
445 uint64_t rx_lcore_mask = 0;
446 uint64_t tx_lcore_mask = 0;
447 uint64_t sched_lcore_mask = 0;
448 uint64_t worker_lcore_mask = 0;
452 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
453 long_options, &option_index);
460 cdata.num_packets = (int64_t)atol(optarg);
461 if (cdata.num_packets == 0)
462 cdata.num_packets = INT64_MAX;
465 cdata.num_fids = (unsigned int)atoi(optarg);
468 cdata.num_stages = (unsigned int)atoi(optarg);
471 cdata.worker_cq_depth = (unsigned int)atoi(optarg);
474 cdata.worker_cycles = (unsigned int)atoi(optarg);
477 cdata.enable_queue_priorities = 1;
480 cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
483 cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
492 worker_lcore_mask = parse_coremask(optarg);
495 rx_lcore_mask = parse_coremask(optarg);
496 popcnt = __builtin_popcountll(rx_lcore_mask);
497 fdata->rx_single = (popcnt == 1);
500 tx_lcore_mask = parse_coremask(optarg);
501 popcnt = __builtin_popcountll(tx_lcore_mask);
502 fdata->tx_single = (popcnt == 1);
505 sched_lcore_mask = parse_coremask(optarg);
506 popcnt = __builtin_popcountll(sched_lcore_mask);
507 fdata->sched_single = (popcnt == 1);
514 if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
515 sched_lcore_mask == 0 || tx_lcore_mask == 0) {
516 printf("Core part of pipeline was not assigned any cores. "
517 "This will stall the pipeline, please check core masks "
518 "(use -h for details on setting core masks):\n"
519 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
520 "\n\tworkers: %"PRIu64"\n",
521 rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
523 rte_exit(-1, "Fix core masks\n");
525 if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
528 for (i = 0; i < MAX_NUM_CORE; i++) {
529 fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
530 fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
531 fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
532 fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
534 if (fdata->worker_core[i])
537 cdata.active_cores++;
542 * Initializes a given port using global settings and with the RX buffers
543 * coming from the mbuf_pool passed as a parameter.
546 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
548 static const struct rte_eth_conf port_conf_default = {
550 .mq_mode = ETH_MQ_RX_RSS,
551 .max_rx_pkt_len = ETHER_MAX_LEN,
552 .ignore_offload_bitfield = 1,
556 .rss_hf = ETH_RSS_IP |
562 const uint16_t rx_rings = 1, tx_rings = 1;
563 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
564 struct rte_eth_conf port_conf = port_conf_default;
567 struct rte_eth_dev_info dev_info;
568 struct rte_eth_txconf txconf;
570 if (port >= rte_eth_dev_count())
573 rte_eth_dev_info_get(port, &dev_info);
574 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
575 port_conf.txmode.offloads |=
576 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
578 /* Configure the Ethernet device. */
579 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
583 /* Allocate and set up 1 RX queue per Ethernet port. */
584 for (q = 0; q < rx_rings; q++) {
585 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
586 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
591 txconf = dev_info.default_txconf;
592 txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
593 txconf.offloads = port_conf_default.txmode.offloads;
594 /* Allocate and set up 1 TX queue per Ethernet port. */
595 for (q = 0; q < tx_rings; q++) {
596 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
597 rte_eth_dev_socket_id(port), &txconf);
602 /* Start the Ethernet port. */
603 retval = rte_eth_dev_start(port);
607 /* Display the port MAC address. */
608 struct ether_addr addr;
609 rte_eth_macaddr_get(port, &addr);
610 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
611 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
613 addr.addr_bytes[0], addr.addr_bytes[1],
614 addr.addr_bytes[2], addr.addr_bytes[3],
615 addr.addr_bytes[4], addr.addr_bytes[5]);
617 /* Enable RX in promiscuous mode for the Ethernet device. */
618 rte_eth_promiscuous_enable(port);
624 init_ports(unsigned int num_ports)
629 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
630 /* mbufs */ 16384 * num_ports,
631 /* cache_size */ 512,
633 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
636 for (portid = 0; portid < num_ports; portid++)
637 if (port_init(portid, mp) != 0)
638 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
641 for (i = 0; i < num_ports; i++) {
642 void *userdata = (void *)(uintptr_t) i;
644 rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
645 if (fdata->tx_buf[i] == NULL)
646 rte_panic("Out of memory\n");
647 rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
648 rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
662 setup_eventdev(struct prod_data *prod_data,
663 struct cons_data *cons_data,
664 struct worker_data *worker_data)
666 const uint8_t dev_id = 0;
667 /* +1 stages is for a SINGLE_LINK TX stage */
668 const uint8_t nb_queues = cdata.num_stages + 1;
669 /* + 2 is one port for producer and one for consumer */
670 const uint8_t nb_ports = cdata.num_workers + 2;
671 struct rte_event_dev_config config = {
672 .nb_event_queues = nb_queues,
673 .nb_event_ports = nb_ports,
674 .nb_events_limit = 4096,
675 .nb_event_queue_flows = 1024,
676 .nb_event_port_dequeue_depth = 128,
677 .nb_event_port_enqueue_depth = 128,
679 struct rte_event_port_conf wkr_p_conf = {
680 .dequeue_depth = cdata.worker_cq_depth,
682 .new_event_threshold = 4096,
684 struct rte_event_queue_conf wkr_q_conf = {
685 .schedule_type = cdata.queue_type,
686 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
687 .nb_atomic_flows = 1024,
688 .nb_atomic_order_sequences = 1024,
690 struct rte_event_port_conf tx_p_conf = {
691 .dequeue_depth = 128,
692 .enqueue_depth = 128,
693 .new_event_threshold = 4096,
695 const struct rte_event_queue_conf tx_q_conf = {
696 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
697 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
700 struct port_link worker_queues[MAX_NUM_STAGES];
701 uint8_t disable_implicit_release;
702 struct port_link tx_queue;
705 int ret, ndev = rte_event_dev_count();
707 printf("%d: No Eventdev Devices Found\n", __LINE__);
711 struct rte_event_dev_info dev_info;
712 ret = rte_event_dev_info_get(dev_id, &dev_info);
713 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
715 disable_implicit_release = (dev_info.event_dev_cap &
716 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
718 wkr_p_conf.disable_implicit_release = disable_implicit_release;
719 tx_p_conf.disable_implicit_release = disable_implicit_release;
721 if (dev_info.max_event_port_dequeue_depth <
722 config.nb_event_port_dequeue_depth)
723 config.nb_event_port_dequeue_depth =
724 dev_info.max_event_port_dequeue_depth;
725 if (dev_info.max_event_port_enqueue_depth <
726 config.nb_event_port_enqueue_depth)
727 config.nb_event_port_enqueue_depth =
728 dev_info.max_event_port_enqueue_depth;
730 ret = rte_event_dev_configure(dev_id, &config);
732 printf("%d: Error configuring device\n", __LINE__);
736 /* Q creation - one load balanced per pipeline stage*/
737 printf(" Stages:\n");
738 for (i = 0; i < cdata.num_stages; i++) {
739 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
740 printf("%d: error creating qid %d\n", __LINE__, i);
744 cdata.next_qid[i] = i+1;
745 worker_queues[i].queue_id = i;
746 if (cdata.enable_queue_priorities) {
747 /* calculate priority stepping for each stage, leaving
748 * headroom of 1 for the SINGLE_LINK TX below
750 const uint32_t prio_delta =
751 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
753 /* higher priority for queues closer to tx */
754 wkr_q_conf.priority =
755 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
758 const char *type_str = "Atomic";
759 switch (wkr_q_conf.schedule_type) {
760 case RTE_SCHED_TYPE_ORDERED:
761 type_str = "Ordered";
763 case RTE_SCHED_TYPE_PARALLEL:
764 type_str = "Parallel";
767 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
768 wkr_q_conf.priority);
772 /* final queue for sending to TX core */
773 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
774 printf("%d: error creating qid %d\n", __LINE__, i);
777 tx_queue.queue_id = i;
778 tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
780 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
781 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
782 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
783 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
785 /* set up one port per worker, linking to all stage queues */
786 for (i = 0; i < cdata.num_workers; i++) {
787 struct worker_data *w = &worker_data[i];
789 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
790 printf("Error setting up port %d\n", i);
795 for (s = 0; s < cdata.num_stages; s++) {
796 if (rte_event_port_link(dev_id, i,
797 &worker_queues[s].queue_id,
798 &worker_queues[s].priority,
800 printf("%d: error creating link for port %d\n",
808 if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
809 tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
810 if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
811 tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
813 /* port for consumer, linked to TX queue */
814 if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
815 printf("Error setting up port %d\n", i);
818 if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
819 &tx_queue.priority, 1) != 1) {
820 printf("%d: error creating link for port %d\n",
824 /* port for producer, no links */
825 struct rte_event_port_conf rx_p_conf = {
828 .new_event_threshold = 1200,
829 .disable_implicit_release = disable_implicit_release,
832 if (rx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
833 rx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
834 if (rx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
835 rx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
837 if (rte_event_port_setup(dev_id, i + 1, &rx_p_conf) < 0) {
838 printf("Error setting up port %d\n", i);
842 *prod_data = (struct prod_data){.dev_id = dev_id,
844 .qid = cdata.qid[0] };
845 *cons_data = (struct cons_data){.dev_id = dev_id,
847 .release = disable_implicit_release };
849 ret = rte_event_dev_service_id_get(dev_id,
850 &fdata->evdev_service_id);
851 if (ret != -ESRCH && ret != 0) {
852 printf("Error getting the service ID for sw eventdev\n");
855 rte_service_runstate_set(fdata->evdev_service_id, 1);
856 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
857 if (rte_event_dev_start(dev_id) < 0) {
858 printf("Error starting eventdev\n");
866 signal_handler(int signum)
869 rte_exit(1, "Exiting on signal %d\n", signum);
870 if (signum == SIGINT || signum == SIGTERM) {
871 printf("\n\nSignal %d received, preparing to exit...\n",
875 if (signum == SIGTSTP)
876 rte_event_dev_dump(0, stdout);
879 static inline uint64_t
880 port_stat(int dev_id, int32_t p)
883 snprintf(statname, sizeof(statname), "port_%u_rx", p);
884 return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
888 main(int argc, char **argv)
890 struct worker_data *worker_data;
891 unsigned int num_ports;
895 signal(SIGINT, signal_handler);
896 signal(SIGTERM, signal_handler);
897 signal(SIGTSTP, signal_handler);
899 err = rte_eal_init(argc, argv);
901 rte_panic("Invalid EAL arguments\n");
906 fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
908 rte_panic("Out of memory\n");
910 /* Parse cli options*/
911 parse_app_args(argc, argv);
913 num_ports = rte_eth_dev_count();
915 rte_panic("No ethernet ports found\n");
917 const unsigned int cores_needed = cdata.active_cores;
920 printf(" Config:\n");
921 printf("\tports: %u\n", num_ports);
922 printf("\tworkers: %u\n", cdata.num_workers);
923 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
924 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
925 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
926 printf("\tqid0 type: ordered\n");
927 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
928 printf("\tqid0 type: atomic\n");
929 printf("\tCores available: %u\n", rte_lcore_count());
930 printf("\tCores used: %u\n", cores_needed);
933 if (rte_lcore_count() < cores_needed)
934 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
937 const unsigned int ndevs = rte_event_dev_count();
939 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
941 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
943 worker_data = rte_calloc(0, cdata.num_workers,
944 sizeof(worker_data[0]), 0);
945 if (worker_data == NULL)
946 rte_panic("rte_calloc failed\n");
948 int dev_id = setup_eventdev(&prod_data, &cons_data, worker_data);
950 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
952 prod_data.num_nic_ports = num_ports;
953 init_ports(num_ports);
956 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
957 if (lcore_id >= MAX_NUM_CORE)
960 if (!fdata->rx_core[lcore_id] &&
961 !fdata->worker_core[lcore_id] &&
962 !fdata->tx_core[lcore_id] &&
963 !fdata->sched_core[lcore_id])
966 if (fdata->rx_core[lcore_id])
968 "[%s()] lcore %d executing NIC Rx, and using eventdev port %u\n",
969 __func__, lcore_id, prod_data.port_id);
971 if (fdata->tx_core[lcore_id])
973 "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
974 __func__, lcore_id, cons_data.port_id);
976 if (fdata->sched_core[lcore_id])
977 printf("[%s()] lcore %d executing scheduler\n",
980 if (fdata->worker_core[lcore_id])
982 "[%s()] lcore %d executing worker, using eventdev port %u\n",
984 worker_data[worker_idx].port_id);
986 err = rte_eal_remote_launch(worker, &worker_data[worker_idx],
989 rte_panic("Failed to launch worker on core %d\n",
993 if (fdata->worker_core[lcore_id])
997 lcore_id = rte_lcore_id();
999 if (core_in_use(lcore_id))
1000 worker(&worker_data[worker_idx++]);
1002 rte_eal_mp_wait_lcore();
1005 rte_event_dev_dump(dev_id, stdout);
1007 if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
1008 (uint64_t)-ENOTSUP)) {
1009 printf("\nPort Workload distribution:\n");
1011 uint64_t tot_pkts = 0;
1012 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
1013 for (i = 0; i < cdata.num_workers; i++) {
1015 port_stat(dev_id, worker_data[i].port_id);
1016 tot_pkts += pkts_per_wkr[i];
1018 for (i = 0; i < cdata.num_workers; i++) {
1019 float pc = pkts_per_wkr[i] * 100 /
1021 printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
1022 i, pc, pkts_per_wkr[i]);