1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_ethdev.h>
12 #include <rte_lcore.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
17 #include <rte_reorder.h>
19 #define RX_DESC_PER_QUEUE 1024
20 #define TX_DESC_PER_QUEUE 1024
22 #define MAX_PKTS_BURST 32
23 #define REORDER_BUFFER_SIZE 8192
24 #define MBUF_PER_POOL 65535
25 #define MBUF_POOL_CACHE_SIZE 250
27 #define RING_SIZE 16384
29 /* Macros for printing using RTE_LOG */
30 #define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1
32 unsigned int portmask;
33 unsigned int disable_reorder;
34 unsigned int insight_worker;
35 volatile uint8_t quit_signal;
37 static struct rte_mempool *mbuf_pool;
39 static struct rte_eth_conf port_conf_default;
41 struct worker_thread_args {
42 struct rte_ring *ring_in;
43 struct rte_ring *ring_out;
46 struct send_thread_args {
47 struct rte_ring *ring_in;
48 struct rte_reorder_buffer *buffer;
51 volatile struct app_stats {
54 uint64_t enqueue_pkts;
55 uint64_t enqueue_failed_pkts;
56 } rx __rte_cache_aligned;
59 uint64_t dequeue_pkts;
60 uint64_t enqueue_pkts;
61 uint64_t enqueue_failed_pkts;
62 } wkr __rte_cache_aligned;
65 uint64_t dequeue_pkts;
66 /* Too early pkts transmitted directly w/o reordering */
67 uint64_t early_pkts_txtd_woro;
68 /* Too early pkts failed from direct transmit */
69 uint64_t early_pkts_tx_failed_woro;
71 uint64_t ro_tx_failed_pkts;
72 } tx __rte_cache_aligned;
75 /* per worker lcore stats */
76 struct wkr_stats_per {
79 uint64_t enq_failed_pkts;
80 } __rte_cache_aligned;
82 static struct wkr_stats_per wkr_stats[RTE_MAX_LCORE] = { {0} };
84 * Get the last enabled lcore ID
87 * The last enabled lcore ID.
90 get_last_lcore_id(void)
94 for (i = RTE_MAX_LCORE - 1; i >= 0; i--)
95 if (rte_lcore_is_enabled(i))
101 * Get the previous enabled lcore ID
103 * The current lcore ID
105 * The previous enabled lcore ID or the current lcore
106 * ID if it is the first available core.
109 get_previous_lcore_id(unsigned int id)
113 for (i = id - 1; i >= 0; i--)
114 if (rte_lcore_is_enabled(i))
120 pktmbuf_free_bulk(struct rte_mbuf *mbuf_table[], unsigned n)
124 for (i = 0; i < n; i++)
125 rte_pktmbuf_free(mbuf_table[i]);
130 print_usage(const char *prgname)
132 printf("%s [EAL options] -- -p PORTMASK\n"
133 " -p PORTMASK: hexadecimal bitmask of ports to configure\n",
138 parse_portmask(const char *portmask)
143 /* parse hexadecimal string */
144 pm = strtoul(portmask, &end, 16);
145 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
154 /* Parse the argument given in the command line of the application */
156 parse_args(int argc, char **argv)
161 char *prgname = argv[0];
162 static struct option lgopts[] = {
163 {"disable-reorder", 0, 0, 0},
164 {"insight-worker", 0, 0, 0},
170 while ((opt = getopt_long(argc, argvopt, "p:",
171 lgopts, &option_index)) != EOF) {
175 portmask = parse_portmask(optarg);
177 printf("invalid portmask\n");
178 print_usage(prgname);
184 if (!strcmp(lgopts[option_index].name, "disable-reorder")) {
185 printf("reorder disabled\n");
188 if (!strcmp(lgopts[option_index].name,
190 printf("print all worker statistics\n");
195 print_usage(prgname);
200 print_usage(prgname);
204 argv[optind-1] = prgname;
205 optind = 1; /* reset getopt lib */
210 * Tx buffer error callback
213 flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
214 void *userdata __rte_unused) {
216 /* free the mbufs which failed from transmit */
217 app_stats.tx.ro_tx_failed_pkts += count;
218 RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
219 pktmbuf_free_bulk(unsent, count);
224 free_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) {
227 /* initialize buffers for all ports */
228 RTE_ETH_FOREACH_DEV(port_id) {
229 /* skip ports that are not enabled */
230 if ((portmask & (1 << port_id)) == 0)
233 rte_free(tx_buffer[port_id]);
239 configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[])
244 /* initialize buffers for all ports */
245 RTE_ETH_FOREACH_DEV(port_id) {
246 /* skip ports that are not enabled */
247 if ((portmask & (1 << port_id)) == 0)
250 /* Initialize TX buffers */
251 tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
252 RTE_ETH_TX_BUFFER_SIZE(MAX_PKTS_BURST), 0,
253 rte_eth_dev_socket_id(port_id));
254 if (tx_buffer[port_id] == NULL)
255 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
258 rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST);
260 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
261 flush_tx_error_callback, NULL);
263 rte_exit(EXIT_FAILURE,
264 "Cannot set error callback for tx buffer on port %u\n",
271 configure_eth_port(uint16_t port_id)
273 struct rte_ether_addr addr;
274 const uint16_t rxRings = 1, txRings = 1;
277 uint16_t nb_rxd = RX_DESC_PER_QUEUE;
278 uint16_t nb_txd = TX_DESC_PER_QUEUE;
279 struct rte_eth_dev_info dev_info;
280 struct rte_eth_txconf txconf;
281 struct rte_eth_conf port_conf = port_conf_default;
283 if (!rte_eth_dev_is_valid_port(port_id))
286 ret = rte_eth_dev_info_get(port_id, &dev_info);
288 printf("Error during getting device (port %u) info: %s\n",
289 port_id, strerror(-ret));
293 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
294 port_conf.txmode.offloads |=
295 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
296 ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf_default);
300 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
304 for (q = 0; q < rxRings; q++) {
305 ret = rte_eth_rx_queue_setup(port_id, q, nb_rxd,
306 rte_eth_dev_socket_id(port_id), NULL,
312 txconf = dev_info.default_txconf;
313 txconf.offloads = port_conf.txmode.offloads;
314 for (q = 0; q < txRings; q++) {
315 ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
316 rte_eth_dev_socket_id(port_id), &txconf);
321 ret = rte_eth_dev_start(port_id);
325 rte_eth_macaddr_get(port_id, &addr);
326 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
327 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
329 addr.addr_bytes[0], addr.addr_bytes[1],
330 addr.addr_bytes[2], addr.addr_bytes[3],
331 addr.addr_bytes[4], addr.addr_bytes[5]);
333 rte_eth_promiscuous_enable(port_id);
342 struct rte_eth_stats eth_stats;
343 unsigned int lcore_id, last_lcore_id, master_lcore_id, end_w_lcore_id;
345 last_lcore_id = get_last_lcore_id();
346 master_lcore_id = rte_get_master_lcore();
347 end_w_lcore_id = get_previous_lcore_id(last_lcore_id);
349 printf("\nRX thread stats:\n");
350 printf(" - Pkts rxd: %"PRIu64"\n",
351 app_stats.rx.rx_pkts);
352 printf(" - Pkts enqd to workers ring: %"PRIu64"\n",
353 app_stats.rx.enqueue_pkts);
355 for (lcore_id = 0; lcore_id <= end_w_lcore_id; lcore_id++) {
357 && rte_lcore_is_enabled(lcore_id)
358 && lcore_id != master_lcore_id) {
359 printf("\nWorker thread stats on core [%u]:\n",
361 printf(" - Pkts deqd from workers ring: %"PRIu64"\n",
362 wkr_stats[lcore_id].deq_pkts);
363 printf(" - Pkts enqd to tx ring: %"PRIu64"\n",
364 wkr_stats[lcore_id].enq_pkts);
365 printf(" - Pkts enq to tx failed: %"PRIu64"\n",
366 wkr_stats[lcore_id].enq_failed_pkts);
369 app_stats.wkr.dequeue_pkts += wkr_stats[lcore_id].deq_pkts;
370 app_stats.wkr.enqueue_pkts += wkr_stats[lcore_id].enq_pkts;
371 app_stats.wkr.enqueue_failed_pkts +=
372 wkr_stats[lcore_id].enq_failed_pkts;
375 printf("\nWorker thread stats:\n");
376 printf(" - Pkts deqd from workers ring: %"PRIu64"\n",
377 app_stats.wkr.dequeue_pkts);
378 printf(" - Pkts enqd to tx ring: %"PRIu64"\n",
379 app_stats.wkr.enqueue_pkts);
380 printf(" - Pkts enq to tx failed: %"PRIu64"\n",
381 app_stats.wkr.enqueue_failed_pkts);
383 printf("\nTX stats:\n");
384 printf(" - Pkts deqd from tx ring: %"PRIu64"\n",
385 app_stats.tx.dequeue_pkts);
386 printf(" - Ro Pkts transmitted: %"PRIu64"\n",
387 app_stats.tx.ro_tx_pkts);
388 printf(" - Ro Pkts tx failed: %"PRIu64"\n",
389 app_stats.tx.ro_tx_failed_pkts);
390 printf(" - Pkts transmitted w/o reorder: %"PRIu64"\n",
391 app_stats.tx.early_pkts_txtd_woro);
392 printf(" - Pkts tx failed w/o reorder: %"PRIu64"\n",
393 app_stats.tx.early_pkts_tx_failed_woro);
395 RTE_ETH_FOREACH_DEV(i) {
396 rte_eth_stats_get(i, ð_stats);
397 printf("\nPort %u stats:\n", i);
398 printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
399 printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets);
400 printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors);
401 printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors);
402 printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf);
407 int_handler(int sig_num)
409 printf("Exiting on signal %d\n", sig_num);
414 * This thread receives mbufs from the port and affects them an internal
415 * sequence number to keep track of their order of arrival through an
417 * The mbufs are then passed to the worker threads via the rx_to_workers
421 rx_thread(struct rte_ring *ring_out)
427 struct rte_mbuf *pkts[MAX_PKTS_BURST];
429 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
432 while (!quit_signal) {
434 RTE_ETH_FOREACH_DEV(port_id) {
435 if ((portmask & (1 << port_id)) != 0) {
437 /* receive packets */
438 nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
439 pkts, MAX_PKTS_BURST);
440 if (nb_rx_pkts == 0) {
441 RTE_LOG_DP(DEBUG, REORDERAPP,
442 "%s():Received zero packets\n", __func__);
445 app_stats.rx.rx_pkts += nb_rx_pkts;
447 /* mark sequence number */
448 for (i = 0; i < nb_rx_pkts; )
449 pkts[i++]->seqn = seqn++;
451 /* enqueue to rx_to_workers ring */
452 ret = rte_ring_enqueue_burst(ring_out,
453 (void *)pkts, nb_rx_pkts, NULL);
454 app_stats.rx.enqueue_pkts += ret;
455 if (unlikely(ret < nb_rx_pkts)) {
456 app_stats.rx.enqueue_failed_pkts +=
458 pktmbuf_free_bulk(&pkts[ret], nb_rx_pkts - ret);
467 * This thread takes bursts of packets from the rx_to_workers ring and
468 * Changes the input port value to output port value. And feds it to
472 worker_thread(void *args_ptr)
474 const uint16_t nb_ports = rte_eth_dev_count_avail();
476 uint16_t burst_size = 0;
477 struct worker_thread_args *args;
478 struct rte_mbuf *burst_buffer[MAX_PKTS_BURST] = { NULL };
479 struct rte_ring *ring_in, *ring_out;
480 const unsigned xor_val = (nb_ports > 1);
481 unsigned int core_id = rte_lcore_id();
483 args = (struct worker_thread_args *) args_ptr;
484 ring_in = args->ring_in;
485 ring_out = args->ring_out;
487 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
490 while (!quit_signal) {
492 /* dequeue the mbufs from rx_to_workers ring */
493 burst_size = rte_ring_dequeue_burst(ring_in,
494 (void *)burst_buffer, MAX_PKTS_BURST, NULL);
495 if (unlikely(burst_size == 0))
498 wkr_stats[core_id].deq_pkts += burst_size;
500 /* just do some operation on mbuf */
501 for (i = 0; i < burst_size;)
502 burst_buffer[i++]->port ^= xor_val;
504 /* enqueue the modified mbufs to workers_to_tx ring */
505 ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
507 wkr_stats[core_id].enq_pkts += ret;
508 if (unlikely(ret < burst_size)) {
509 /* Return the mbufs to their respective pool, dropping packets */
510 wkr_stats[core_id].enq_failed_pkts += burst_size - ret;
511 pktmbuf_free_bulk(&burst_buffer[ret], burst_size - ret);
518 * Dequeue mbufs from the workers_to_tx ring and reorder them before
522 send_thread(struct send_thread_args *args)
525 unsigned int i, dret;
526 uint16_t nb_dq_mbufs;
529 struct rte_mbuf *mbufs[MAX_PKTS_BURST];
530 struct rte_mbuf *rombufs[MAX_PKTS_BURST] = {NULL};
531 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
533 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id());
535 configure_tx_buffers(tx_buffer);
537 while (!quit_signal) {
539 /* deque the mbufs from workers_to_tx ring */
540 nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
541 (void *)mbufs, MAX_PKTS_BURST, NULL);
543 if (unlikely(nb_dq_mbufs == 0))
546 app_stats.tx.dequeue_pkts += nb_dq_mbufs;
548 for (i = 0; i < nb_dq_mbufs; i++) {
549 /* send dequeued mbufs for reordering */
550 ret = rte_reorder_insert(args->buffer, mbufs[i]);
552 if (ret == -1 && rte_errno == ERANGE) {
553 /* Too early pkts should be transmitted out directly */
554 RTE_LOG_DP(DEBUG, REORDERAPP,
555 "%s():Cannot reorder early packet "
556 "direct enqueuing to TX\n", __func__);
557 outp = mbufs[i]->port;
558 if ((portmask & (1 << outp)) == 0) {
559 rte_pktmbuf_free(mbufs[i]);
562 if (rte_eth_tx_burst(outp, 0, (void *)mbufs[i], 1) != 1) {
563 rte_pktmbuf_free(mbufs[i]);
564 app_stats.tx.early_pkts_tx_failed_woro++;
566 app_stats.tx.early_pkts_txtd_woro++;
567 } else if (ret == -1 && rte_errno == ENOSPC) {
569 * Early pkts just outside of window should be dropped
571 rte_pktmbuf_free(mbufs[i]);
576 * drain MAX_PKTS_BURST of reordered
579 dret = rte_reorder_drain(args->buffer, rombufs, MAX_PKTS_BURST);
580 for (i = 0; i < dret; i++) {
582 struct rte_eth_dev_tx_buffer *outbuf;
585 outp1 = rombufs[i]->port;
586 /* skip ports that are not enabled */
587 if ((portmask & (1 << outp1)) == 0) {
588 rte_pktmbuf_free(rombufs[i]);
592 outbuf = tx_buffer[outp1];
593 sent = rte_eth_tx_buffer(outp1, 0, outbuf, rombufs[i]);
595 app_stats.tx.ro_tx_pkts += sent;
599 free_tx_buffers(tx_buffer);
605 * Dequeue mbufs from the workers_to_tx ring and transmit them
608 tx_thread(struct rte_ring *ring_in)
613 struct rte_mbuf *mbufs[MAX_PKTS_BURST];
614 struct rte_eth_dev_tx_buffer *outbuf;
615 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
617 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
620 configure_tx_buffers(tx_buffer);
622 while (!quit_signal) {
624 /* deque the mbufs from workers_to_tx ring */
625 dqnum = rte_ring_dequeue_burst(ring_in,
626 (void *)mbufs, MAX_PKTS_BURST, NULL);
628 if (unlikely(dqnum == 0))
631 app_stats.tx.dequeue_pkts += dqnum;
633 for (i = 0; i < dqnum; i++) {
634 outp = mbufs[i]->port;
635 /* skip ports that are not enabled */
636 if ((portmask & (1 << outp)) == 0) {
637 rte_pktmbuf_free(mbufs[i]);
641 outbuf = tx_buffer[outp];
642 sent = rte_eth_tx_buffer(outp, 0, outbuf, mbufs[i]);
644 app_stats.tx.ro_tx_pkts += sent;
652 main(int argc, char **argv)
656 unsigned int lcore_id, last_lcore_id, master_lcore_id;
658 uint16_t nb_ports_available;
659 struct worker_thread_args worker_args = {NULL, NULL};
660 struct send_thread_args send_args = {NULL, NULL};
661 struct rte_ring *rx_to_workers;
662 struct rte_ring *workers_to_tx;
664 /* catch ctrl-c so we can print on exit */
665 signal(SIGINT, int_handler);
668 ret = rte_eal_init(argc, argv);
675 /* Parse the application specific arguments */
676 ret = parse_args(argc, argv);
680 /* Check if we have enought cores */
681 if (rte_lcore_count() < 3)
682 rte_exit(EXIT_FAILURE, "Error, This application needs at "
683 "least 3 logical cores to run:\n"
684 "1 lcore for packet RX\n"
685 "1 lcore for packet TX\n"
686 "and at least 1 lcore for worker threads\n");
688 nb_ports = rte_eth_dev_count_avail();
690 rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
691 if (nb_ports != 1 && (nb_ports & 1))
692 rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
693 "when using a single port\n");
695 mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
696 MBUF_POOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
698 if (mbuf_pool == NULL)
699 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
701 nb_ports_available = nb_ports;
703 /* initialize all ports */
704 RTE_ETH_FOREACH_DEV(port_id) {
705 /* skip ports that are not enabled */
706 if ((portmask & (1 << port_id)) == 0) {
707 printf("\nSkipping disabled port %d\n", port_id);
708 nb_ports_available--;
712 printf("Initializing port %u... done\n", port_id);
714 if (configure_eth_port(port_id) != 0)
715 rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
719 if (!nb_ports_available) {
720 rte_exit(EXIT_FAILURE,
721 "All available ports are disabled. Please set portmask.\n");
724 /* Create rings for inter core communication */
725 rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
727 if (rx_to_workers == NULL)
728 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
730 workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
732 if (workers_to_tx == NULL)
733 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
735 if (!disable_reorder) {
736 send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
737 REORDER_BUFFER_SIZE);
738 if (send_args.buffer == NULL)
739 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
742 last_lcore_id = get_last_lcore_id();
743 master_lcore_id = rte_get_master_lcore();
745 worker_args.ring_in = rx_to_workers;
746 worker_args.ring_out = workers_to_tx;
748 /* Start worker_thread() on all the available slave cores but the last 1 */
749 for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
750 if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
751 rte_eal_remote_launch(worker_thread, (void *)&worker_args,
754 if (disable_reorder) {
755 /* Start tx_thread() on the last slave core */
756 rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
759 send_args.ring_in = workers_to_tx;
760 /* Start send_thread() on the last slave core */
761 rte_eal_remote_launch((lcore_function_t *)send_thread,
762 (void *)&send_args, last_lcore_id);
765 /* Start rx_thread() on the master core */
766 rx_thread(rx_to_workers);
768 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
769 if (rte_eal_wait_lcore(lcore_id) < 0)