1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_ethdev.h>
12 #include <rte_lcore.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
17 #include <rte_reorder.h>
19 #define RX_DESC_PER_QUEUE 1024
20 #define TX_DESC_PER_QUEUE 1024
22 #define MAX_PKTS_BURST 32
23 #define REORDER_BUFFER_SIZE 8192
24 #define MBUF_PER_POOL 65535
25 #define MBUF_POOL_CACHE_SIZE 250
27 #define RING_SIZE 16384
29 /* Macros for printing using RTE_LOG */
30 #define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1
32 unsigned int portmask;
33 unsigned int disable_reorder;
34 volatile uint8_t quit_signal;
36 static struct rte_mempool *mbuf_pool;
38 static struct rte_eth_conf port_conf_default = {
40 .ignore_offload_bitfield = 1,
44 struct worker_thread_args {
45 struct rte_ring *ring_in;
46 struct rte_ring *ring_out;
49 struct send_thread_args {
50 struct rte_ring *ring_in;
51 struct rte_reorder_buffer *buffer;
54 volatile struct app_stats {
57 uint64_t enqueue_pkts;
58 uint64_t enqueue_failed_pkts;
59 } rx __rte_cache_aligned;
62 uint64_t dequeue_pkts;
63 uint64_t enqueue_pkts;
64 uint64_t enqueue_failed_pkts;
65 } wkr __rte_cache_aligned;
68 uint64_t dequeue_pkts;
69 /* Too early pkts transmitted directly w/o reordering */
70 uint64_t early_pkts_txtd_woro;
71 /* Too early pkts failed from direct transmit */
72 uint64_t early_pkts_tx_failed_woro;
74 uint64_t ro_tx_failed_pkts;
75 } tx __rte_cache_aligned;
79 * Get the last enabled lcore ID
82 * The last enabled lcore ID.
85 get_last_lcore_id(void)
89 for (i = RTE_MAX_LCORE - 1; i >= 0; i--)
90 if (rte_lcore_is_enabled(i))
96 * Get the previous enabled lcore ID
98 * The current lcore ID
100 * The previous enabled lcore ID or the current lcore
101 * ID if it is the first available core.
104 get_previous_lcore_id(unsigned int id)
108 for (i = id - 1; i >= 0; i--)
109 if (rte_lcore_is_enabled(i))
115 pktmbuf_free_bulk(struct rte_mbuf *mbuf_table[], unsigned n)
119 for (i = 0; i < n; i++)
120 rte_pktmbuf_free(mbuf_table[i]);
125 print_usage(const char *prgname)
127 printf("%s [EAL options] -- -p PORTMASK\n"
128 " -p PORTMASK: hexadecimal bitmask of ports to configure\n",
133 parse_portmask(const char *portmask)
138 /* parse hexadecimal string */
139 pm = strtoul(portmask, &end, 16);
140 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
149 /* Parse the argument given in the command line of the application */
151 parse_args(int argc, char **argv)
156 char *prgname = argv[0];
157 static struct option lgopts[] = {
158 {"disable-reorder", 0, 0, 0},
164 while ((opt = getopt_long(argc, argvopt, "p:",
165 lgopts, &option_index)) != EOF) {
169 portmask = parse_portmask(optarg);
171 printf("invalid portmask\n");
172 print_usage(prgname);
178 if (!strcmp(lgopts[option_index].name, "disable-reorder")) {
179 printf("reorder disabled\n");
184 print_usage(prgname);
189 print_usage(prgname);
193 argv[optind-1] = prgname;
194 optind = 1; /* reset getopt lib */
199 * Tx buffer error callback
202 flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
203 void *userdata __rte_unused) {
205 /* free the mbufs which failed from transmit */
206 app_stats.tx.ro_tx_failed_pkts += count;
207 RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
208 pktmbuf_free_bulk(unsent, count);
213 free_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) {
216 /* initialize buffers for all ports */
217 RTE_ETH_FOREACH_DEV(port_id) {
218 /* skip ports that are not enabled */
219 if ((portmask & (1 << port_id)) == 0)
222 rte_free(tx_buffer[port_id]);
228 configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[])
233 /* initialize buffers for all ports */
234 RTE_ETH_FOREACH_DEV(port_id) {
235 /* skip ports that are not enabled */
236 if ((portmask & (1 << port_id)) == 0)
239 /* Initialize TX buffers */
240 tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
241 RTE_ETH_TX_BUFFER_SIZE(MAX_PKTS_BURST), 0,
242 rte_eth_dev_socket_id(port_id));
243 if (tx_buffer[port_id] == NULL)
244 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
247 rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST);
249 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
250 flush_tx_error_callback, NULL);
252 rte_exit(EXIT_FAILURE,
253 "Cannot set error callback for tx buffer on port %u\n",
260 configure_eth_port(uint16_t port_id)
262 struct ether_addr addr;
263 const uint16_t rxRings = 1, txRings = 1;
266 uint16_t nb_rxd = RX_DESC_PER_QUEUE;
267 uint16_t nb_txd = TX_DESC_PER_QUEUE;
268 struct rte_eth_dev_info dev_info;
269 struct rte_eth_txconf txconf;
270 struct rte_eth_conf port_conf = port_conf_default;
272 if (!rte_eth_dev_is_valid_port(port_id))
275 rte_eth_dev_info_get(port_id, &dev_info);
276 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
277 port_conf.txmode.offloads |=
278 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
279 ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf_default);
283 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
287 for (q = 0; q < rxRings; q++) {
288 ret = rte_eth_rx_queue_setup(port_id, q, nb_rxd,
289 rte_eth_dev_socket_id(port_id), NULL,
295 txconf = dev_info.default_txconf;
296 txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
297 txconf.offloads = port_conf.txmode.offloads;
298 for (q = 0; q < txRings; q++) {
299 ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
300 rte_eth_dev_socket_id(port_id), &txconf);
305 ret = rte_eth_dev_start(port_id);
309 rte_eth_macaddr_get(port_id, &addr);
310 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
311 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
313 addr.addr_bytes[0], addr.addr_bytes[1],
314 addr.addr_bytes[2], addr.addr_bytes[3],
315 addr.addr_bytes[4], addr.addr_bytes[5]);
317 rte_eth_promiscuous_enable(port_id);
326 struct rte_eth_stats eth_stats;
328 printf("\nRX thread stats:\n");
329 printf(" - Pkts rxd: %"PRIu64"\n",
330 app_stats.rx.rx_pkts);
331 printf(" - Pkts enqd to workers ring: %"PRIu64"\n",
332 app_stats.rx.enqueue_pkts);
334 printf("\nWorker thread stats:\n");
335 printf(" - Pkts deqd from workers ring: %"PRIu64"\n",
336 app_stats.wkr.dequeue_pkts);
337 printf(" - Pkts enqd to tx ring: %"PRIu64"\n",
338 app_stats.wkr.enqueue_pkts);
339 printf(" - Pkts enq to tx failed: %"PRIu64"\n",
340 app_stats.wkr.enqueue_failed_pkts);
342 printf("\nTX stats:\n");
343 printf(" - Pkts deqd from tx ring: %"PRIu64"\n",
344 app_stats.tx.dequeue_pkts);
345 printf(" - Ro Pkts transmitted: %"PRIu64"\n",
346 app_stats.tx.ro_tx_pkts);
347 printf(" - Ro Pkts tx failed: %"PRIu64"\n",
348 app_stats.tx.ro_tx_failed_pkts);
349 printf(" - Pkts transmitted w/o reorder: %"PRIu64"\n",
350 app_stats.tx.early_pkts_txtd_woro);
351 printf(" - Pkts tx failed w/o reorder: %"PRIu64"\n",
352 app_stats.tx.early_pkts_tx_failed_woro);
354 RTE_ETH_FOREACH_DEV(i) {
355 rte_eth_stats_get(i, ð_stats);
356 printf("\nPort %u stats:\n", i);
357 printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
358 printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets);
359 printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors);
360 printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors);
361 printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf);
366 int_handler(int sig_num)
368 printf("Exiting on signal %d\n", sig_num);
373 * This thread receives mbufs from the port and affects them an internal
374 * sequence number to keep track of their order of arrival through an
376 * The mbufs are then passed to the worker threads via the rx_to_workers
380 rx_thread(struct rte_ring *ring_out)
386 struct rte_mbuf *pkts[MAX_PKTS_BURST];
388 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
391 while (!quit_signal) {
393 RTE_ETH_FOREACH_DEV(port_id) {
394 if ((portmask & (1 << port_id)) != 0) {
396 /* receive packets */
397 nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
398 pkts, MAX_PKTS_BURST);
399 if (nb_rx_pkts == 0) {
400 RTE_LOG_DP(DEBUG, REORDERAPP,
401 "%s():Received zero packets\n", __func__);
404 app_stats.rx.rx_pkts += nb_rx_pkts;
406 /* mark sequence number */
407 for (i = 0; i < nb_rx_pkts; )
408 pkts[i++]->seqn = seqn++;
410 /* enqueue to rx_to_workers ring */
411 ret = rte_ring_enqueue_burst(ring_out,
412 (void *)pkts, nb_rx_pkts, NULL);
413 app_stats.rx.enqueue_pkts += ret;
414 if (unlikely(ret < nb_rx_pkts)) {
415 app_stats.rx.enqueue_failed_pkts +=
417 pktmbuf_free_bulk(&pkts[ret], nb_rx_pkts - ret);
426 * This thread takes bursts of packets from the rx_to_workers ring and
427 * Changes the input port value to output port value. And feds it to
431 worker_thread(void *args_ptr)
433 const uint16_t nb_ports = rte_eth_dev_count_avail();
435 uint16_t burst_size = 0;
436 struct worker_thread_args *args;
437 struct rte_mbuf *burst_buffer[MAX_PKTS_BURST] = { NULL };
438 struct rte_ring *ring_in, *ring_out;
439 const unsigned xor_val = (nb_ports > 1);
441 args = (struct worker_thread_args *) args_ptr;
442 ring_in = args->ring_in;
443 ring_out = args->ring_out;
445 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
448 while (!quit_signal) {
450 /* dequeue the mbufs from rx_to_workers ring */
451 burst_size = rte_ring_dequeue_burst(ring_in,
452 (void *)burst_buffer, MAX_PKTS_BURST, NULL);
453 if (unlikely(burst_size == 0))
456 __sync_fetch_and_add(&app_stats.wkr.dequeue_pkts, burst_size);
458 /* just do some operation on mbuf */
459 for (i = 0; i < burst_size;)
460 burst_buffer[i++]->port ^= xor_val;
462 /* enqueue the modified mbufs to workers_to_tx ring */
463 ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
465 __sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
466 if (unlikely(ret < burst_size)) {
467 /* Return the mbufs to their respective pool, dropping packets */
468 __sync_fetch_and_add(&app_stats.wkr.enqueue_failed_pkts,
469 (int)burst_size - ret);
470 pktmbuf_free_bulk(&burst_buffer[ret], burst_size - ret);
477 * Dequeue mbufs from the workers_to_tx ring and reorder them before
481 send_thread(struct send_thread_args *args)
484 unsigned int i, dret;
485 uint16_t nb_dq_mbufs;
488 struct rte_mbuf *mbufs[MAX_PKTS_BURST];
489 struct rte_mbuf *rombufs[MAX_PKTS_BURST] = {NULL};
490 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
492 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id());
494 configure_tx_buffers(tx_buffer);
496 while (!quit_signal) {
498 /* deque the mbufs from workers_to_tx ring */
499 nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
500 (void *)mbufs, MAX_PKTS_BURST, NULL);
502 if (unlikely(nb_dq_mbufs == 0))
505 app_stats.tx.dequeue_pkts += nb_dq_mbufs;
507 for (i = 0; i < nb_dq_mbufs; i++) {
508 /* send dequeued mbufs for reordering */
509 ret = rte_reorder_insert(args->buffer, mbufs[i]);
511 if (ret == -1 && rte_errno == ERANGE) {
512 /* Too early pkts should be transmitted out directly */
513 RTE_LOG_DP(DEBUG, REORDERAPP,
514 "%s():Cannot reorder early packet "
515 "direct enqueuing to TX\n", __func__);
516 outp = mbufs[i]->port;
517 if ((portmask & (1 << outp)) == 0) {
518 rte_pktmbuf_free(mbufs[i]);
521 if (rte_eth_tx_burst(outp, 0, (void *)mbufs[i], 1) != 1) {
522 rte_pktmbuf_free(mbufs[i]);
523 app_stats.tx.early_pkts_tx_failed_woro++;
525 app_stats.tx.early_pkts_txtd_woro++;
526 } else if (ret == -1 && rte_errno == ENOSPC) {
528 * Early pkts just outside of window should be dropped
530 rte_pktmbuf_free(mbufs[i]);
535 * drain MAX_PKTS_BURST of reordered
538 dret = rte_reorder_drain(args->buffer, rombufs, MAX_PKTS_BURST);
539 for (i = 0; i < dret; i++) {
541 struct rte_eth_dev_tx_buffer *outbuf;
544 outp1 = rombufs[i]->port;
545 /* skip ports that are not enabled */
546 if ((portmask & (1 << outp1)) == 0) {
547 rte_pktmbuf_free(rombufs[i]);
551 outbuf = tx_buffer[outp1];
552 sent = rte_eth_tx_buffer(outp1, 0, outbuf, rombufs[i]);
554 app_stats.tx.ro_tx_pkts += sent;
558 free_tx_buffers(tx_buffer);
564 * Dequeue mbufs from the workers_to_tx ring and transmit them
567 tx_thread(struct rte_ring *ring_in)
572 struct rte_mbuf *mbufs[MAX_PKTS_BURST];
573 struct rte_eth_dev_tx_buffer *outbuf;
574 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
576 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
579 configure_tx_buffers(tx_buffer);
581 while (!quit_signal) {
583 /* deque the mbufs from workers_to_tx ring */
584 dqnum = rte_ring_dequeue_burst(ring_in,
585 (void *)mbufs, MAX_PKTS_BURST, NULL);
587 if (unlikely(dqnum == 0))
590 app_stats.tx.dequeue_pkts += dqnum;
592 for (i = 0; i < dqnum; i++) {
593 outp = mbufs[i]->port;
594 /* skip ports that are not enabled */
595 if ((portmask & (1 << outp)) == 0) {
596 rte_pktmbuf_free(mbufs[i]);
600 outbuf = tx_buffer[outp];
601 sent = rte_eth_tx_buffer(outp, 0, outbuf, mbufs[i]);
603 app_stats.tx.ro_tx_pkts += sent;
611 main(int argc, char **argv)
615 unsigned int lcore_id, last_lcore_id, master_lcore_id;
617 uint16_t nb_ports_available;
618 struct worker_thread_args worker_args = {NULL, NULL};
619 struct send_thread_args send_args = {NULL, NULL};
620 struct rte_ring *rx_to_workers;
621 struct rte_ring *workers_to_tx;
623 /* catch ctrl-c so we can print on exit */
624 signal(SIGINT, int_handler);
627 ret = rte_eal_init(argc, argv);
634 /* Parse the application specific arguments */
635 ret = parse_args(argc, argv);
639 /* Check if we have enought cores */
640 if (rte_lcore_count() < 3)
641 rte_exit(EXIT_FAILURE, "Error, This application needs at "
642 "least 3 logical cores to run:\n"
643 "1 lcore for packet RX\n"
644 "1 lcore for packet TX\n"
645 "and at least 1 lcore for worker threads\n");
647 nb_ports = rte_eth_dev_count_avail();
649 rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
650 if (nb_ports != 1 && (nb_ports & 1))
651 rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
652 "when using a single port\n");
654 mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
655 MBUF_POOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
657 if (mbuf_pool == NULL)
658 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
660 nb_ports_available = nb_ports;
662 /* initialize all ports */
663 RTE_ETH_FOREACH_DEV(port_id) {
664 /* skip ports that are not enabled */
665 if ((portmask & (1 << port_id)) == 0) {
666 printf("\nSkipping disabled port %d\n", port_id);
667 nb_ports_available--;
671 printf("Initializing port %u... done\n", port_id);
673 if (configure_eth_port(port_id) != 0)
674 rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
678 if (!nb_ports_available) {
679 rte_exit(EXIT_FAILURE,
680 "All available ports are disabled. Please set portmask.\n");
683 /* Create rings for inter core communication */
684 rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
686 if (rx_to_workers == NULL)
687 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
689 workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
691 if (workers_to_tx == NULL)
692 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
694 if (!disable_reorder) {
695 send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
696 REORDER_BUFFER_SIZE);
697 if (send_args.buffer == NULL)
698 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
701 last_lcore_id = get_last_lcore_id();
702 master_lcore_id = rte_get_master_lcore();
704 worker_args.ring_in = rx_to_workers;
705 worker_args.ring_out = workers_to_tx;
707 /* Start worker_thread() on all the available slave cores but the last 1 */
708 for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
709 if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
710 rte_eal_remote_launch(worker_thread, (void *)&worker_args,
713 if (disable_reorder) {
714 /* Start tx_thread() on the last slave core */
715 rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
718 send_args.ring_in = workers_to_tx;
719 /* Start send_thread() on the last slave core */
720 rte_eal_remote_launch((lcore_function_t *)send_thread,
721 (void *)&send_args, last_lcore_id);
724 /* Start rx_thread() on the master core */
725 rx_thread(rx_to_workers);
727 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
728 if (rte_eal_wait_lcore(lcore_id) < 0)