1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
13 #include <rte_mempool.h>
15 #include <rte_launch.h>
16 #include <rte_malloc.h>
17 #include <rte_random.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
20 #include <rte_eventdev.h>
21 #include <rte_service.h>
23 #define MAX_NUM_STAGES 8
25 #define MAX_NUM_CORE 64
31 unsigned int num_nic_ports;
32 } __rte_cache_aligned;
37 } __rte_cache_aligned;
39 static struct prod_data prod_data;
40 static struct cons_data cons_data;
45 } __rte_cache_aligned;
47 struct fastpath_data {
52 uint32_t evdev_service_id;
56 unsigned int rx_core[MAX_NUM_CORE];
57 unsigned int tx_core[MAX_NUM_CORE];
58 unsigned int sched_core[MAX_NUM_CORE];
59 unsigned int worker_core[MAX_NUM_CORE];
60 struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
63 static struct fastpath_data *fdata;
66 unsigned int active_cores;
67 unsigned int num_workers;
69 unsigned int num_fids;
72 int enable_queue_priorities;
76 unsigned int num_stages;
77 unsigned int worker_cq_depth;
78 int16_t next_qid[MAX_NUM_STAGES+2];
79 int16_t qid[MAX_NUM_STAGES];
82 static struct config_data cdata = {
83 .num_packets = (1L << 25), /* do ~32M packets */
85 .queue_type = RTE_SCHED_TYPE_ATOMIC,
93 core_in_use(unsigned int lcore_id) {
94 return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
95 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
99 eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
102 int port_id = (uintptr_t) userdata;
103 unsigned int _sent = 0;
106 /* Note: hard-coded TX queue */
107 _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
109 } while (_sent != unsent);
115 const uint64_t freq_khz = rte_get_timer_hz() / 1000;
116 struct rte_event packets[BATCH_SIZE];
118 static uint64_t received;
119 static uint64_t last_pkts;
120 static uint64_t last_time;
121 static uint64_t start_time;
123 uint8_t dev_id = cons_data.dev_id;
124 uint8_t port_id = cons_data.port_id;
126 uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
127 packets, RTE_DIM(packets), 0);
130 for (j = 0; j < rte_eth_dev_count(); j++)
131 rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
135 last_time = start_time = rte_get_timer_cycles();
138 for (i = 0; i < n; i++) {
139 uint8_t outport = packets[i].mbuf->port;
140 rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
144 /* Print out mpps every 1<22 packets */
145 if (!cdata.quiet && received >= last_pkts + (1<<22)) {
146 const uint64_t now = rte_get_timer_cycles();
147 const uint64_t total_ms = (now - start_time) / freq_khz;
148 const uint64_t delta_ms = (now - last_time) / freq_khz;
149 uint64_t delta_pkts = received - last_pkts;
151 printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
152 "avg %.3f mpps [current %.3f mpps]\n",
155 received / (total_ms * 1000.0),
156 delta_pkts / (delta_ms * 1000.0));
157 last_pkts = received;
161 cdata.num_packets -= n;
162 if (cdata.num_packets <= 0)
171 static uint8_t eth_port;
172 struct rte_mbuf *mbufs[BATCH_SIZE+2];
173 struct rte_event ev[BATCH_SIZE+2];
174 uint32_t i, num_ports = prod_data.num_nic_ports;
175 int32_t qid = prod_data.qid;
176 uint8_t dev_id = prod_data.dev_id;
177 uint8_t port_id = prod_data.port_id;
178 uint32_t prio_idx = 0;
180 const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
181 if (++eth_port == num_ports)
188 for (i = 0; i < nb_rx; i++) {
189 ev[i].flow_id = mbufs[i]->hash.rss;
190 ev[i].op = RTE_EVENT_OP_NEW;
191 ev[i].sched_type = cdata.queue_type;
192 ev[i].queue_id = qid;
193 ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
194 ev[i].sub_event_type = 0;
195 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
196 ev[i].mbuf = mbufs[i];
197 RTE_SET_USED(prio_idx);
200 const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
201 if (nb_tx != nb_rx) {
202 for (i = nb_tx; i < nb_rx; i++)
203 rte_pktmbuf_free(mbufs[i]);
210 schedule_devices(unsigned int lcore_id)
212 if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
213 rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
215 rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
218 if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
219 rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
220 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
221 if (cdata.dump_dev_signal) {
222 rte_event_dev_dump(0, stdout);
223 cdata.dump_dev_signal = 0;
225 rte_atomic32_clear((rte_atomic32_t *)&(fdata->sched_lock));
228 if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
229 rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
231 rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
236 work(struct rte_mbuf *m)
238 struct ether_hdr *eth;
239 struct ether_addr addr;
241 /* change mac addresses on packet (to use mbuf data) */
243 * FIXME Swap mac address properly and also handle the
244 * case for both odd and even number of stages that the
245 * addresses end up the same at the end of the pipeline
247 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
248 ether_addr_copy(ð->d_addr, &addr);
249 ether_addr_copy(&addr, ð->d_addr);
251 /* do a number of cycles of work per packet */
252 volatile uint64_t start_tsc = rte_rdtsc();
253 while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
260 struct rte_event events[BATCH_SIZE];
262 struct worker_data *data = (struct worker_data *)arg;
263 uint8_t dev_id = data->dev_id;
264 uint8_t port_id = data->port_id;
265 size_t sent = 0, received = 0;
266 unsigned int lcore_id = rte_lcore_id();
268 while (!fdata->done) {
271 schedule_devices(lcore_id);
273 if (!fdata->worker_core[lcore_id]) {
278 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
279 events, RTE_DIM(events), 0);
287 for (i = 0; i < nb_rx; i++) {
289 /* The first worker stage does classification */
290 if (events[i].queue_id == cdata.qid[0])
291 events[i].flow_id = events[i].mbuf->hash.rss
294 events[i].queue_id = cdata.next_qid[events[i].queue_id];
295 events[i].op = RTE_EVENT_OP_FORWARD;
296 events[i].sched_type = cdata.queue_type;
298 work(events[i].mbuf);
300 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
302 while (nb_tx < nb_rx && !fdata->done)
303 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
310 printf(" worker %u thread done. RX=%zu TX=%zu\n",
311 rte_lcore_id(), received, sent);
317 * Parse the coremask given as argument (hexadecimal string) and fill
318 * the global configuration (core role and core count) with the parsed
321 static int xdigit2val(unsigned char c)
335 parse_coremask(const char *coremask)
338 unsigned int count = 0;
342 const int32_t BITS_HEX = 4;
344 if (coremask == NULL)
346 /* Remove all blank characters ahead and after .
347 * Remove 0x/0X if exists.
349 while (isblank(*coremask))
351 if (coremask[0] == '0' && ((coremask[1] == 'x')
352 || (coremask[1] == 'X')))
354 i = strlen(coremask);
355 while ((i > 0) && isblank(coremask[i - 1]))
360 for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
362 if (isxdigit(c) == 0) {
363 /* invalid characters */
367 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
368 if ((1 << j) & val) {
369 mask |= (1UL << idx);
375 if (coremask[i] != '0')
382 static struct option long_options[] = {
383 {"workers", required_argument, 0, 'w'},
384 {"packets", required_argument, 0, 'n'},
385 {"atomic-flows", required_argument, 0, 'f'},
386 {"num_stages", required_argument, 0, 's'},
387 {"rx-mask", required_argument, 0, 'r'},
388 {"tx-mask", required_argument, 0, 't'},
389 {"sched-mask", required_argument, 0, 'e'},
390 {"cq-depth", required_argument, 0, 'c'},
391 {"work-cycles", required_argument, 0, 'W'},
392 {"queue-priority", no_argument, 0, 'P'},
393 {"parallel", no_argument, 0, 'p'},
394 {"ordered", no_argument, 0, 'o'},
395 {"quiet", no_argument, 0, 'q'},
396 {"dump", no_argument, 0, 'D'},
403 const char *usage_str =
404 " Usage: eventdev_demo [options]\n"
406 " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
407 " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
408 " -s, --num_stages=N Use N atomic stages (default 1)\n"
409 " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
410 " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
411 " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
412 " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
413 " -c --cq-depth=N Worker CQ depth (default 16)\n"
414 " -W --work-cycles=N Worker cycles (default 0)\n"
415 " -P --queue-priority Enable scheduler queue prioritization\n"
416 " -o, --ordered Use ordered scheduling\n"
417 " -p, --parallel Use parallel scheduling\n"
418 " -q, --quiet Minimize printed output\n"
419 " -D, --dump Print detailed statistics before exit"
421 fprintf(stderr, "%s", usage_str);
426 parse_app_args(int argc, char **argv)
428 /* Parse cli options*/
432 uint64_t rx_lcore_mask = 0;
433 uint64_t tx_lcore_mask = 0;
434 uint64_t sched_lcore_mask = 0;
435 uint64_t worker_lcore_mask = 0;
439 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
440 long_options, &option_index);
447 cdata.num_packets = (int64_t)atol(optarg);
448 if (cdata.num_packets == 0)
449 cdata.num_packets = INT64_MAX;
452 cdata.num_fids = (unsigned int)atoi(optarg);
455 cdata.num_stages = (unsigned int)atoi(optarg);
458 cdata.worker_cq_depth = (unsigned int)atoi(optarg);
461 cdata.worker_cycles = (unsigned int)atoi(optarg);
464 cdata.enable_queue_priorities = 1;
467 cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
470 cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
479 worker_lcore_mask = parse_coremask(optarg);
482 rx_lcore_mask = parse_coremask(optarg);
483 popcnt = __builtin_popcountll(rx_lcore_mask);
484 fdata->rx_single = (popcnt == 1);
487 tx_lcore_mask = parse_coremask(optarg);
488 popcnt = __builtin_popcountll(tx_lcore_mask);
489 fdata->tx_single = (popcnt == 1);
492 sched_lcore_mask = parse_coremask(optarg);
493 popcnt = __builtin_popcountll(sched_lcore_mask);
494 fdata->sched_single = (popcnt == 1);
501 if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
502 sched_lcore_mask == 0 || tx_lcore_mask == 0) {
503 printf("Core part of pipeline was not assigned any cores. "
504 "This will stall the pipeline, please check core masks "
505 "(use -h for details on setting core masks):\n"
506 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
507 "\n\tworkers: %"PRIu64"\n",
508 rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
510 rte_exit(-1, "Fix core masks\n");
512 if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
515 for (i = 0; i < MAX_NUM_CORE; i++) {
516 fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
517 fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
518 fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
519 fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
521 if (fdata->worker_core[i])
524 cdata.active_cores++;
529 * Initializes a given port using global settings and with the RX buffers
530 * coming from the mbuf_pool passed as a parameter.
533 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
535 static const struct rte_eth_conf port_conf_default = {
537 .mq_mode = ETH_MQ_RX_RSS,
538 .max_rx_pkt_len = ETHER_MAX_LEN,
539 .ignore_offload_bitfield = 1,
543 .rss_hf = ETH_RSS_IP |
549 const uint16_t rx_rings = 1, tx_rings = 1;
550 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
551 struct rte_eth_conf port_conf = port_conf_default;
554 struct rte_eth_dev_info dev_info;
555 struct rte_eth_txconf txconf;
557 if (port >= rte_eth_dev_count())
560 rte_eth_dev_info_get(port, &dev_info);
561 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
562 port_conf.txmode.offloads |=
563 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
565 /* Configure the Ethernet device. */
566 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
570 /* Allocate and set up 1 RX queue per Ethernet port. */
571 for (q = 0; q < rx_rings; q++) {
572 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
573 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
578 txconf = dev_info.default_txconf;
579 txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
580 txconf.offloads = port_conf_default.txmode.offloads;
581 /* Allocate and set up 1 TX queue per Ethernet port. */
582 for (q = 0; q < tx_rings; q++) {
583 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
584 rte_eth_dev_socket_id(port), &txconf);
589 /* Start the Ethernet port. */
590 retval = rte_eth_dev_start(port);
594 /* Display the port MAC address. */
595 struct ether_addr addr;
596 rte_eth_macaddr_get(port, &addr);
597 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
598 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
600 addr.addr_bytes[0], addr.addr_bytes[1],
601 addr.addr_bytes[2], addr.addr_bytes[3],
602 addr.addr_bytes[4], addr.addr_bytes[5]);
604 /* Enable RX in promiscuous mode for the Ethernet device. */
605 rte_eth_promiscuous_enable(port);
611 init_ports(unsigned int num_ports)
616 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
617 /* mbufs */ 16384 * num_ports,
618 /* cache_size */ 512,
620 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
623 for (portid = 0; portid < num_ports; portid++)
624 if (port_init(portid, mp) != 0)
625 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
628 for (i = 0; i < num_ports; i++) {
629 void *userdata = (void *)(uintptr_t) i;
631 rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
632 if (fdata->tx_buf[i] == NULL)
633 rte_panic("Out of memory\n");
634 rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
635 rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
649 setup_eventdev(struct prod_data *prod_data,
650 struct cons_data *cons_data,
651 struct worker_data *worker_data)
653 const uint8_t dev_id = 0;
654 /* +1 stages is for a SINGLE_LINK TX stage */
655 const uint8_t nb_queues = cdata.num_stages + 1;
656 /* + 2 is one port for producer and one for consumer */
657 const uint8_t nb_ports = cdata.num_workers + 2;
658 struct rte_event_dev_config config = {
659 .nb_event_queues = nb_queues,
660 .nb_event_ports = nb_ports,
661 .nb_events_limit = 4096,
662 .nb_event_queue_flows = 1024,
663 .nb_event_port_dequeue_depth = 128,
664 .nb_event_port_enqueue_depth = 128,
666 struct rte_event_port_conf wkr_p_conf = {
667 .dequeue_depth = cdata.worker_cq_depth,
669 .new_event_threshold = 4096,
671 struct rte_event_queue_conf wkr_q_conf = {
672 .schedule_type = cdata.queue_type,
673 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
674 .nb_atomic_flows = 1024,
675 .nb_atomic_order_sequences = 1024,
677 struct rte_event_port_conf tx_p_conf = {
678 .dequeue_depth = 128,
679 .enqueue_depth = 128,
680 .new_event_threshold = 4096,
682 const struct rte_event_queue_conf tx_q_conf = {
683 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
684 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
687 struct port_link worker_queues[MAX_NUM_STAGES];
688 struct port_link tx_queue;
691 int ret, ndev = rte_event_dev_count();
693 printf("%d: No Eventdev Devices Found\n", __LINE__);
697 struct rte_event_dev_info dev_info;
698 ret = rte_event_dev_info_get(dev_id, &dev_info);
699 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
701 if (dev_info.max_event_port_dequeue_depth <
702 config.nb_event_port_dequeue_depth)
703 config.nb_event_port_dequeue_depth =
704 dev_info.max_event_port_dequeue_depth;
705 if (dev_info.max_event_port_enqueue_depth <
706 config.nb_event_port_enqueue_depth)
707 config.nb_event_port_enqueue_depth =
708 dev_info.max_event_port_enqueue_depth;
710 ret = rte_event_dev_configure(dev_id, &config);
712 printf("%d: Error configuring device\n", __LINE__);
716 /* Q creation - one load balanced per pipeline stage*/
717 printf(" Stages:\n");
718 for (i = 0; i < cdata.num_stages; i++) {
719 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
720 printf("%d: error creating qid %d\n", __LINE__, i);
724 cdata.next_qid[i] = i+1;
725 worker_queues[i].queue_id = i;
726 if (cdata.enable_queue_priorities) {
727 /* calculate priority stepping for each stage, leaving
728 * headroom of 1 for the SINGLE_LINK TX below
730 const uint32_t prio_delta =
731 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
733 /* higher priority for queues closer to tx */
734 wkr_q_conf.priority =
735 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
738 const char *type_str = "Atomic";
739 switch (wkr_q_conf.schedule_type) {
740 case RTE_SCHED_TYPE_ORDERED:
741 type_str = "Ordered";
743 case RTE_SCHED_TYPE_PARALLEL:
744 type_str = "Parallel";
747 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
748 wkr_q_conf.priority);
752 /* final queue for sending to TX core */
753 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
754 printf("%d: error creating qid %d\n", __LINE__, i);
757 tx_queue.queue_id = i;
758 tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
760 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
761 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
762 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
763 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
765 /* set up one port per worker, linking to all stage queues */
766 for (i = 0; i < cdata.num_workers; i++) {
767 struct worker_data *w = &worker_data[i];
769 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
770 printf("Error setting up port %d\n", i);
775 for (s = 0; s < cdata.num_stages; s++) {
776 if (rte_event_port_link(dev_id, i,
777 &worker_queues[s].queue_id,
778 &worker_queues[s].priority,
780 printf("%d: error creating link for port %d\n",
788 if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
789 tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
790 if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
791 tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
793 /* port for consumer, linked to TX queue */
794 if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
795 printf("Error setting up port %d\n", i);
798 if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
799 &tx_queue.priority, 1) != 1) {
800 printf("%d: error creating link for port %d\n",
804 /* port for producer, no links */
805 struct rte_event_port_conf rx_p_conf = {
808 .new_event_threshold = 1200,
811 if (rx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
812 rx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
813 if (rx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
814 rx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
816 if (rte_event_port_setup(dev_id, i + 1, &rx_p_conf) < 0) {
817 printf("Error setting up port %d\n", i);
821 *prod_data = (struct prod_data){.dev_id = dev_id,
823 .qid = cdata.qid[0] };
824 *cons_data = (struct cons_data){.dev_id = dev_id,
827 ret = rte_event_dev_service_id_get(dev_id,
828 &fdata->evdev_service_id);
829 if (ret != -ESRCH && ret != 0) {
830 printf("Error getting the service ID for sw eventdev\n");
833 rte_service_runstate_set(fdata->evdev_service_id, 1);
834 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
835 if (rte_event_dev_start(dev_id) < 0) {
836 printf("Error starting eventdev\n");
844 signal_handler(int signum)
847 rte_exit(1, "Exiting on signal %d\n", signum);
848 if (signum == SIGINT || signum == SIGTERM) {
849 printf("\n\nSignal %d received, preparing to exit...\n",
853 if (signum == SIGTSTP)
854 rte_event_dev_dump(0, stdout);
857 static inline uint64_t
858 port_stat(int dev_id, int32_t p)
861 snprintf(statname, sizeof(statname), "port_%u_rx", p);
862 return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
866 main(int argc, char **argv)
868 struct worker_data *worker_data;
869 unsigned int num_ports;
873 signal(SIGINT, signal_handler);
874 signal(SIGTERM, signal_handler);
875 signal(SIGTSTP, signal_handler);
877 err = rte_eal_init(argc, argv);
879 rte_panic("Invalid EAL arguments\n");
884 fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
886 rte_panic("Out of memory\n");
888 /* Parse cli options*/
889 parse_app_args(argc, argv);
891 num_ports = rte_eth_dev_count();
893 rte_panic("No ethernet ports found\n");
895 const unsigned int cores_needed = cdata.active_cores;
898 printf(" Config:\n");
899 printf("\tports: %u\n", num_ports);
900 printf("\tworkers: %u\n", cdata.num_workers);
901 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
902 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
903 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
904 printf("\tqid0 type: ordered\n");
905 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
906 printf("\tqid0 type: atomic\n");
907 printf("\tCores available: %u\n", rte_lcore_count());
908 printf("\tCores used: %u\n", cores_needed);
911 if (rte_lcore_count() < cores_needed)
912 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
915 const unsigned int ndevs = rte_event_dev_count();
917 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
919 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
921 worker_data = rte_calloc(0, cdata.num_workers,
922 sizeof(worker_data[0]), 0);
923 if (worker_data == NULL)
924 rte_panic("rte_calloc failed\n");
926 int dev_id = setup_eventdev(&prod_data, &cons_data, worker_data);
928 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
930 prod_data.num_nic_ports = num_ports;
931 init_ports(num_ports);
934 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
935 if (lcore_id >= MAX_NUM_CORE)
938 if (!fdata->rx_core[lcore_id] &&
939 !fdata->worker_core[lcore_id] &&
940 !fdata->tx_core[lcore_id] &&
941 !fdata->sched_core[lcore_id])
944 if (fdata->rx_core[lcore_id])
946 "[%s()] lcore %d executing NIC Rx, and using eventdev port %u\n",
947 __func__, lcore_id, prod_data.port_id);
949 if (fdata->tx_core[lcore_id])
951 "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
952 __func__, lcore_id, cons_data.port_id);
954 if (fdata->sched_core[lcore_id])
955 printf("[%s()] lcore %d executing scheduler\n",
958 if (fdata->worker_core[lcore_id])
960 "[%s()] lcore %d executing worker, using eventdev port %u\n",
962 worker_data[worker_idx].port_id);
964 err = rte_eal_remote_launch(worker, &worker_data[worker_idx],
967 rte_panic("Failed to launch worker on core %d\n",
971 if (fdata->worker_core[lcore_id])
975 lcore_id = rte_lcore_id();
977 if (core_in_use(lcore_id))
978 worker(&worker_data[worker_idx++]);
980 rte_eal_mp_wait_lcore();
983 rte_event_dev_dump(dev_id, stdout);
985 if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
986 (uint64_t)-ENOTSUP)) {
987 printf("\nPort Workload distribution:\n");
989 uint64_t tot_pkts = 0;
990 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
991 for (i = 0; i < cdata.num_workers; i++) {
993 port_stat(dev_id, worker_data[i].port_id);
994 tot_pkts += pkts_per_wkr[i];
996 for (i = 0; i < cdata.num_workers; i++) {
997 float pc = pkts_per_wkr[i] * 100 /
999 printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
1000 i, pc, pkts_per_wkr[i]);