1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
11 #include "pipeline_common.h"
13 struct config_data cdata = {
14 .num_packets = (1L << 25), /* do ~32M packets */
16 .queue_type = RTE_SCHED_TYPE_ATOMIC,
24 core_in_use(unsigned int lcore_id) {
25 return (fdata->rx_core[lcore_id] || fdata->sched_core[lcore_id] ||
26 fdata->tx_core[lcore_id] || fdata->worker_core[lcore_id]);
30 eth_tx_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
33 int port_id = (uintptr_t) userdata;
34 unsigned int _sent = 0;
37 /* Note: hard-coded TX queue */
38 _sent += rte_eth_tx_burst(port_id, 0, &pkts[_sent],
40 } while (_sent != unsent);
46 const uint64_t freq_khz = rte_get_timer_hz() / 1000;
47 struct rte_event packets[BATCH_SIZE];
49 static uint64_t received;
50 static uint64_t last_pkts;
51 static uint64_t last_time;
52 static uint64_t start_time;
54 uint8_t dev_id = cons_data.dev_id;
55 uint8_t port_id = cons_data.port_id;
57 uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
58 packets, RTE_DIM(packets), 0);
61 for (j = 0; j < rte_eth_dev_count(); j++)
62 rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
66 last_time = start_time = rte_get_timer_cycles();
69 for (i = 0; i < n; i++) {
70 uint8_t outport = packets[i].mbuf->port;
71 rte_eth_tx_buffer(outport, 0, fdata->tx_buf[outport],
74 packets[i].op = RTE_EVENT_OP_RELEASE;
77 if (cons_data.release) {
80 nb_tx = rte_event_enqueue_burst(dev_id, port_id, packets, n);
82 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
87 /* Print out mpps every 1<22 packets */
88 if (!cdata.quiet && received >= last_pkts + (1<<22)) {
89 const uint64_t now = rte_get_timer_cycles();
90 const uint64_t total_ms = (now - start_time) / freq_khz;
91 const uint64_t delta_ms = (now - last_time) / freq_khz;
92 uint64_t delta_pkts = received - last_pkts;
94 printf("# consumer RX=%"PRIu64", time %"PRIu64 "ms, "
95 "avg %.3f mpps [current %.3f mpps]\n",
98 received / (total_ms * 1000.0),
99 delta_pkts / (delta_ms * 1000.0));
100 last_pkts = received;
104 cdata.num_packets -= n;
105 if (cdata.num_packets <= 0)
112 schedule_devices(unsigned int lcore_id)
114 if (fdata->rx_core[lcore_id]) {
115 rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id,
119 if (fdata->sched_core[lcore_id]) {
120 rte_service_run_iter_on_app_lcore(fdata->evdev_service_id,
121 !fdata->sched_single);
122 if (cdata.dump_dev_signal) {
123 rte_event_dev_dump(0, stdout);
124 cdata.dump_dev_signal = 0;
128 if (fdata->tx_core[lcore_id] && (fdata->tx_single ||
129 rte_atomic32_cmpset(&(fdata->tx_lock), 0, 1))) {
131 rte_atomic32_clear((rte_atomic32_t *)&(fdata->tx_lock));
136 work(struct rte_mbuf *m)
138 struct ether_hdr *eth;
139 struct ether_addr addr;
141 /* change mac addresses on packet (to use mbuf data) */
143 * FIXME Swap mac address properly and also handle the
144 * case for both odd and even number of stages that the
145 * addresses end up the same at the end of the pipeline
147 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
148 ether_addr_copy(ð->d_addr, &addr);
149 ether_addr_copy(&addr, ð->d_addr);
151 /* do a number of cycles of work per packet */
152 volatile uint64_t start_tsc = rte_rdtsc();
153 while (rte_rdtsc() < start_tsc + cdata.worker_cycles)
160 struct rte_event events[BATCH_SIZE];
162 struct worker_data *data = (struct worker_data *)arg;
163 uint8_t dev_id = data->dev_id;
164 uint8_t port_id = data->port_id;
165 size_t sent = 0, received = 0;
166 unsigned int lcore_id = rte_lcore_id();
168 while (!fdata->done) {
171 schedule_devices(lcore_id);
173 if (!fdata->worker_core[lcore_id]) {
178 const uint16_t nb_rx = rte_event_dequeue_burst(dev_id, port_id,
179 events, RTE_DIM(events), 0);
187 for (i = 0; i < nb_rx; i++) {
189 /* The first worker stage does classification */
190 if (events[i].queue_id == cdata.qid[0])
191 events[i].flow_id = events[i].mbuf->hash.rss
194 events[i].queue_id = cdata.next_qid[events[i].queue_id];
195 events[i].op = RTE_EVENT_OP_FORWARD;
196 events[i].sched_type = cdata.queue_type;
198 work(events[i].mbuf);
200 uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
202 while (nb_tx < nb_rx && !fdata->done)
203 nb_tx += rte_event_enqueue_burst(dev_id, port_id,
210 printf(" worker %u thread done. RX=%zu TX=%zu\n",
211 rte_lcore_id(), received, sent);
217 * Parse the coremask given as argument (hexadecimal string) and fill
218 * the global configuration (core role and core count) with the parsed
221 static int xdigit2val(unsigned char c)
235 parse_coremask(const char *coremask)
238 unsigned int count = 0;
242 const int32_t BITS_HEX = 4;
244 if (coremask == NULL)
246 /* Remove all blank characters ahead and after .
247 * Remove 0x/0X if exists.
249 while (isblank(*coremask))
251 if (coremask[0] == '0' && ((coremask[1] == 'x')
252 || (coremask[1] == 'X')))
254 i = strlen(coremask);
255 while ((i > 0) && isblank(coremask[i - 1]))
260 for (i = i - 1; i >= 0 && idx < MAX_NUM_CORE; i--) {
262 if (isxdigit(c) == 0) {
263 /* invalid characters */
267 for (j = 0; j < BITS_HEX && idx < MAX_NUM_CORE; j++, idx++) {
268 if ((1 << j) & val) {
269 mask |= (1UL << idx);
275 if (coremask[i] != '0')
282 static struct option long_options[] = {
283 {"workers", required_argument, 0, 'w'},
284 {"packets", required_argument, 0, 'n'},
285 {"atomic-flows", required_argument, 0, 'f'},
286 {"num_stages", required_argument, 0, 's'},
287 {"rx-mask", required_argument, 0, 'r'},
288 {"tx-mask", required_argument, 0, 't'},
289 {"sched-mask", required_argument, 0, 'e'},
290 {"cq-depth", required_argument, 0, 'c'},
291 {"work-cycles", required_argument, 0, 'W'},
292 {"queue-priority", no_argument, 0, 'P'},
293 {"parallel", no_argument, 0, 'p'},
294 {"ordered", no_argument, 0, 'o'},
295 {"quiet", no_argument, 0, 'q'},
296 {"dump", no_argument, 0, 'D'},
303 const char *usage_str =
304 " Usage: eventdev_demo [options]\n"
306 " -n, --packets=N Send N packets (default ~32M), 0 implies no limit\n"
307 " -f, --atomic-flows=N Use N random flows from 1 to N (default 16)\n"
308 " -s, --num_stages=N Use N atomic stages (default 1)\n"
309 " -r, --rx-mask=core mask Run NIC rx on CPUs in core mask\n"
310 " -w, --worker-mask=core mask Run worker on CPUs in core mask\n"
311 " -t, --tx-mask=core mask Run NIC tx on CPUs in core mask\n"
312 " -e --sched-mask=core mask Run scheduler on CPUs in core mask\n"
313 " -c --cq-depth=N Worker CQ depth (default 16)\n"
314 " -W --work-cycles=N Worker cycles (default 0)\n"
315 " -P --queue-priority Enable scheduler queue prioritization\n"
316 " -o, --ordered Use ordered scheduling\n"
317 " -p, --parallel Use parallel scheduling\n"
318 " -q, --quiet Minimize printed output\n"
319 " -D, --dump Print detailed statistics before exit"
321 fprintf(stderr, "%s", usage_str);
326 parse_app_args(int argc, char **argv)
328 /* Parse cli options*/
332 uint64_t rx_lcore_mask = 0;
333 uint64_t tx_lcore_mask = 0;
334 uint64_t sched_lcore_mask = 0;
335 uint64_t worker_lcore_mask = 0;
339 c = getopt_long(argc, argv, "r:t:e:c:w:n:f:s:poPqDW:",
340 long_options, &option_index);
347 cdata.num_packets = (int64_t)atol(optarg);
348 if (cdata.num_packets == 0)
349 cdata.num_packets = INT64_MAX;
352 cdata.num_fids = (unsigned int)atoi(optarg);
355 cdata.num_stages = (unsigned int)atoi(optarg);
358 cdata.worker_cq_depth = (unsigned int)atoi(optarg);
361 cdata.worker_cycles = (unsigned int)atoi(optarg);
364 cdata.enable_queue_priorities = 1;
367 cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
370 cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
379 worker_lcore_mask = parse_coremask(optarg);
382 rx_lcore_mask = parse_coremask(optarg);
383 popcnt = __builtin_popcountll(rx_lcore_mask);
384 fdata->rx_single = (popcnt == 1);
387 tx_lcore_mask = parse_coremask(optarg);
388 popcnt = __builtin_popcountll(tx_lcore_mask);
389 fdata->tx_single = (popcnt == 1);
392 sched_lcore_mask = parse_coremask(optarg);
393 popcnt = __builtin_popcountll(sched_lcore_mask);
394 fdata->sched_single = (popcnt == 1);
401 if (worker_lcore_mask == 0 || rx_lcore_mask == 0 ||
402 sched_lcore_mask == 0 || tx_lcore_mask == 0) {
403 printf("Core part of pipeline was not assigned any cores. "
404 "This will stall the pipeline, please check core masks "
405 "(use -h for details on setting core masks):\n"
406 "\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
407 "\n\tworkers: %"PRIu64"\n",
408 rx_lcore_mask, tx_lcore_mask, sched_lcore_mask,
410 rte_exit(-1, "Fix core masks\n");
412 if (cdata.num_stages == 0 || cdata.num_stages > MAX_NUM_STAGES)
415 for (i = 0; i < MAX_NUM_CORE; i++) {
416 fdata->rx_core[i] = !!(rx_lcore_mask & (1UL << i));
417 fdata->tx_core[i] = !!(tx_lcore_mask & (1UL << i));
418 fdata->sched_core[i] = !!(sched_lcore_mask & (1UL << i));
419 fdata->worker_core[i] = !!(worker_lcore_mask & (1UL << i));
421 if (fdata->worker_core[i])
424 cdata.active_cores++;
429 init_rx_adapter(uint16_t nb_ports)
433 uint8_t evdev_id = 0;
434 struct rte_event_dev_info dev_info;
436 ret = rte_event_dev_info_get(evdev_id, &dev_info);
438 struct rte_event_port_conf rx_p_conf = {
441 .new_event_threshold = 1200,
444 if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
445 rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
446 if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
447 rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
449 ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
452 rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
453 cdata.rx_adapter_id);
455 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
456 .ev.sched_type = cdata.queue_type,
457 .ev.queue_id = cdata.qid[0],
460 for (i = 0; i < nb_ports; i++) {
463 ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
465 rte_exit(EXIT_FAILURE,
466 "failed to get event rx adapter "
469 ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
472 rte_exit(EXIT_FAILURE,
473 "Failed to add queues to Rx adapter");
476 ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
477 &fdata->rxadptr_service_id);
478 if (ret != -ESRCH && ret != 0) {
479 rte_exit(EXIT_FAILURE,
480 "Error getting the service ID for Rx adapter\n");
482 rte_service_runstate_set(fdata->rxadptr_service_id, 1);
483 rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
485 ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
487 rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
488 cdata.rx_adapter_id);
493 * Initializes a given port using global settings and with the RX buffers
494 * coming from the mbuf_pool passed as a parameter.
497 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
499 static const struct rte_eth_conf port_conf_default = {
501 .mq_mode = ETH_MQ_RX_RSS,
502 .max_rx_pkt_len = ETHER_MAX_LEN,
503 .ignore_offload_bitfield = 1,
507 .rss_hf = ETH_RSS_IP |
513 const uint16_t rx_rings = 1, tx_rings = 1;
514 const uint16_t rx_ring_size = 512, tx_ring_size = 512;
515 struct rte_eth_conf port_conf = port_conf_default;
518 struct rte_eth_dev_info dev_info;
519 struct rte_eth_txconf txconf;
521 if (port >= rte_eth_dev_count())
524 rte_eth_dev_info_get(port, &dev_info);
525 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
526 port_conf.txmode.offloads |=
527 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
529 /* Configure the Ethernet device. */
530 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
534 /* Allocate and set up 1 RX queue per Ethernet port. */
535 for (q = 0; q < rx_rings; q++) {
536 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
537 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
542 txconf = dev_info.default_txconf;
543 txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
544 txconf.offloads = port_conf_default.txmode.offloads;
545 /* Allocate and set up 1 TX queue per Ethernet port. */
546 for (q = 0; q < tx_rings; q++) {
547 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
548 rte_eth_dev_socket_id(port), &txconf);
553 /* Start the Ethernet port. */
554 retval = rte_eth_dev_start(port);
558 /* Display the port MAC address. */
559 struct ether_addr addr;
560 rte_eth_macaddr_get(port, &addr);
561 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
562 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
564 addr.addr_bytes[0], addr.addr_bytes[1],
565 addr.addr_bytes[2], addr.addr_bytes[3],
566 addr.addr_bytes[4], addr.addr_bytes[5]);
568 /* Enable RX in promiscuous mode for the Ethernet device. */
569 rte_eth_promiscuous_enable(port);
575 init_ports(unsigned int num_ports)
580 struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool",
581 /* mbufs */ 16384 * num_ports,
582 /* cache_size */ 512,
584 /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
587 for (portid = 0; portid < num_ports; portid++)
588 if (port_init(portid, mp) != 0)
589 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
592 for (i = 0; i < num_ports; i++) {
593 void *userdata = (void *)(uintptr_t) i;
595 rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
596 if (fdata->tx_buf[i] == NULL)
597 rte_panic("Out of memory\n");
598 rte_eth_tx_buffer_init(fdata->tx_buf[i], 32);
599 rte_eth_tx_buffer_set_err_callback(fdata->tx_buf[i],
613 setup_eventdev(struct cons_data *cons_data,
614 struct worker_data *worker_data)
616 const uint8_t dev_id = 0;
617 /* +1 stages is for a SINGLE_LINK TX stage */
618 const uint8_t nb_queues = cdata.num_stages + 1;
619 /* + 1 for consumer */
620 const uint8_t nb_ports = cdata.num_workers + 1;
621 struct rte_event_dev_config config = {
622 .nb_event_queues = nb_queues,
623 .nb_event_ports = nb_ports,
624 .nb_events_limit = 4096,
625 .nb_event_queue_flows = 1024,
626 .nb_event_port_dequeue_depth = 128,
627 .nb_event_port_enqueue_depth = 128,
629 struct rte_event_port_conf wkr_p_conf = {
630 .dequeue_depth = cdata.worker_cq_depth,
632 .new_event_threshold = 4096,
634 struct rte_event_queue_conf wkr_q_conf = {
635 .schedule_type = cdata.queue_type,
636 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
637 .nb_atomic_flows = 1024,
638 .nb_atomic_order_sequences = 1024,
640 struct rte_event_port_conf tx_p_conf = {
641 .dequeue_depth = 128,
642 .enqueue_depth = 128,
643 .new_event_threshold = 4096,
645 const struct rte_event_queue_conf tx_q_conf = {
646 .priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
647 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
650 struct port_link worker_queues[MAX_NUM_STAGES];
651 uint8_t disable_implicit_release;
652 struct port_link tx_queue;
655 int ret, ndev = rte_event_dev_count();
657 printf("%d: No Eventdev Devices Found\n", __LINE__);
661 struct rte_event_dev_info dev_info;
662 ret = rte_event_dev_info_get(dev_id, &dev_info);
663 printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
665 disable_implicit_release = (dev_info.event_dev_cap &
666 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
668 wkr_p_conf.disable_implicit_release = disable_implicit_release;
669 tx_p_conf.disable_implicit_release = disable_implicit_release;
671 if (dev_info.max_event_port_dequeue_depth <
672 config.nb_event_port_dequeue_depth)
673 config.nb_event_port_dequeue_depth =
674 dev_info.max_event_port_dequeue_depth;
675 if (dev_info.max_event_port_enqueue_depth <
676 config.nb_event_port_enqueue_depth)
677 config.nb_event_port_enqueue_depth =
678 dev_info.max_event_port_enqueue_depth;
680 ret = rte_event_dev_configure(dev_id, &config);
682 printf("%d: Error configuring device\n", __LINE__);
686 /* Q creation - one load balanced per pipeline stage*/
687 printf(" Stages:\n");
688 for (i = 0; i < cdata.num_stages; i++) {
689 if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
690 printf("%d: error creating qid %d\n", __LINE__, i);
694 cdata.next_qid[i] = i+1;
695 worker_queues[i].queue_id = i;
696 if (cdata.enable_queue_priorities) {
697 /* calculate priority stepping for each stage, leaving
698 * headroom of 1 for the SINGLE_LINK TX below
700 const uint32_t prio_delta =
701 (RTE_EVENT_DEV_PRIORITY_LOWEST-1) / nb_queues;
703 /* higher priority for queues closer to tx */
704 wkr_q_conf.priority =
705 RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta * i;
708 const char *type_str = "Atomic";
709 switch (wkr_q_conf.schedule_type) {
710 case RTE_SCHED_TYPE_ORDERED:
711 type_str = "Ordered";
713 case RTE_SCHED_TYPE_PARALLEL:
714 type_str = "Parallel";
717 printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
718 wkr_q_conf.priority);
722 /* final queue for sending to TX core */
723 if (rte_event_queue_setup(dev_id, i, &tx_q_conf) < 0) {
724 printf("%d: error creating qid %d\n", __LINE__, i);
727 tx_queue.queue_id = i;
728 tx_queue.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
730 if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
731 wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
732 if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
733 wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
735 /* set up one port per worker, linking to all stage queues */
736 for (i = 0; i < cdata.num_workers; i++) {
737 struct worker_data *w = &worker_data[i];
739 if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
740 printf("Error setting up port %d\n", i);
745 for (s = 0; s < cdata.num_stages; s++) {
746 if (rte_event_port_link(dev_id, i,
747 &worker_queues[s].queue_id,
748 &worker_queues[s].priority,
750 printf("%d: error creating link for port %d\n",
758 if (tx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
759 tx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
760 if (tx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
761 tx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
763 /* port for consumer, linked to TX queue */
764 if (rte_event_port_setup(dev_id, i, &tx_p_conf) < 0) {
765 printf("Error setting up port %d\n", i);
768 if (rte_event_port_link(dev_id, i, &tx_queue.queue_id,
769 &tx_queue.priority, 1) != 1) {
770 printf("%d: error creating link for port %d\n",
774 *cons_data = (struct cons_data){.dev_id = dev_id,
776 .release = disable_implicit_release };
778 ret = rte_event_dev_service_id_get(dev_id,
779 &fdata->evdev_service_id);
780 if (ret != -ESRCH && ret != 0) {
781 printf("Error getting the service ID for sw eventdev\n");
784 rte_service_runstate_set(fdata->evdev_service_id, 1);
785 rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
786 if (rte_event_dev_start(dev_id) < 0) {
787 printf("Error starting eventdev\n");
795 signal_handler(int signum)
798 rte_exit(1, "Exiting on signal %d\n", signum);
799 if (signum == SIGINT || signum == SIGTERM) {
800 printf("\n\nSignal %d received, preparing to exit...\n",
804 if (signum == SIGTSTP)
805 rte_event_dev_dump(0, stdout);
808 static inline uint64_t
809 port_stat(int dev_id, int32_t p)
812 snprintf(statname, sizeof(statname), "port_%u_rx", p);
813 return rte_event_dev_xstats_by_name_get(dev_id, statname, NULL);
817 main(int argc, char **argv)
819 struct worker_data *worker_data;
820 unsigned int num_ports;
824 signal(SIGINT, signal_handler);
825 signal(SIGTERM, signal_handler);
826 signal(SIGTSTP, signal_handler);
828 err = rte_eal_init(argc, argv);
830 rte_panic("Invalid EAL arguments\n");
835 fdata = rte_malloc(NULL, sizeof(struct fastpath_data), 0);
837 rte_panic("Out of memory\n");
839 /* Parse cli options*/
840 parse_app_args(argc, argv);
842 num_ports = rte_eth_dev_count();
844 rte_panic("No ethernet ports found\n");
846 const unsigned int cores_needed = cdata.active_cores;
849 printf(" Config:\n");
850 printf("\tports: %u\n", num_ports);
851 printf("\tworkers: %u\n", cdata.num_workers);
852 printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
853 printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
854 if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
855 printf("\tqid0 type: ordered\n");
856 if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
857 printf("\tqid0 type: atomic\n");
858 printf("\tCores available: %u\n", rte_lcore_count());
859 printf("\tCores used: %u\n", cores_needed);
862 if (rte_lcore_count() < cores_needed)
863 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(),
866 const unsigned int ndevs = rte_event_dev_count();
868 rte_panic("No dev_id devs found. Pasl in a --vdev eventdev.\n");
870 fprintf(stderr, "Warning: More than one eventdev, using idx 0");
872 worker_data = rte_calloc(0, cdata.num_workers,
873 sizeof(worker_data[0]), 0);
874 if (worker_data == NULL)
875 rte_panic("rte_calloc failed\n");
877 int dev_id = setup_eventdev(&cons_data, worker_data);
879 rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
881 init_ports(num_ports);
882 init_rx_adapter(num_ports);
885 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
886 if (lcore_id >= MAX_NUM_CORE)
889 if (!fdata->rx_core[lcore_id] &&
890 !fdata->worker_core[lcore_id] &&
891 !fdata->tx_core[lcore_id] &&
892 !fdata->sched_core[lcore_id])
895 if (fdata->rx_core[lcore_id])
897 "[%s()] lcore %d executing NIC Rx\n",
900 if (fdata->tx_core[lcore_id])
902 "[%s()] lcore %d executing NIC Tx, and using eventdev port %u\n",
903 __func__, lcore_id, cons_data.port_id);
905 if (fdata->sched_core[lcore_id])
906 printf("[%s()] lcore %d executing scheduler\n",
909 if (fdata->worker_core[lcore_id])
911 "[%s()] lcore %d executing worker, using eventdev port %u\n",
913 worker_data[worker_idx].port_id);
915 err = rte_eal_remote_launch(worker, &worker_data[worker_idx],
918 rte_panic("Failed to launch worker on core %d\n",
922 if (fdata->worker_core[lcore_id])
926 lcore_id = rte_lcore_id();
928 if (core_in_use(lcore_id))
929 worker(&worker_data[worker_idx++]);
931 rte_eal_mp_wait_lcore();
934 rte_event_dev_dump(dev_id, stdout);
936 if (!cdata.quiet && (port_stat(dev_id, worker_data[0].port_id) !=
937 (uint64_t)-ENOTSUP)) {
938 printf("\nPort Workload distribution:\n");
940 uint64_t tot_pkts = 0;
941 uint64_t pkts_per_wkr[RTE_MAX_LCORE] = {0};
942 for (i = 0; i < cdata.num_workers; i++) {
944 port_stat(dev_id, worker_data[i].port_id);
945 tot_pkts += pkts_per_wkr[i];
947 for (i = 0; i < cdata.num_workers; i++) {
948 float pc = pkts_per_wkr[i] * 100 /
950 printf("worker %i :\t%.1f %% (%"PRIu64" pkts)\n",
951 i, pc, pkts_per_wkr[i]);