4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_common.h>
39 #include <rte_errno.h>
40 #include <rte_ethdev.h>
41 #include <rte_lcore.h>
42 #include <rte_malloc.h>
44 #include <rte_mempool.h>
46 #include <rte_reorder.h>
48 #define RX_DESC_PER_QUEUE 128
49 #define TX_DESC_PER_QUEUE 512
51 #define MAX_PKTS_BURST 32
52 #define REORDER_BUFFER_SIZE 8192
53 #define MBUF_PER_POOL 65535
54 #define MBUF_POOL_CACHE_SIZE 250
56 #define RING_SIZE 16384
58 /* uncomment below line to enable debug logs */
62 #define LOG_LEVEL RTE_LOG_DEBUG
63 #define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
65 #define LOG_LEVEL RTE_LOG_INFO
66 #define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
69 /* Macros for printing using RTE_LOG */
70 #define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1
72 unsigned int portmask;
73 unsigned int disable_reorder;
74 volatile uint8_t quit_signal;
76 static struct rte_mempool *mbuf_pool;
78 static struct rte_eth_conf port_conf_default;
80 struct worker_thread_args {
81 struct rte_ring *ring_in;
82 struct rte_ring *ring_out;
85 struct send_thread_args {
86 struct rte_ring *ring_in;
87 struct rte_reorder_buffer *buffer;
90 volatile struct app_stats {
93 uint64_t enqueue_pkts;
94 uint64_t enqueue_failed_pkts;
95 } rx __rte_cache_aligned;
98 uint64_t dequeue_pkts;
99 uint64_t enqueue_pkts;
100 uint64_t enqueue_failed_pkts;
101 } wkr __rte_cache_aligned;
104 uint64_t dequeue_pkts;
105 /* Too early pkts transmitted directly w/o reordering */
106 uint64_t early_pkts_txtd_woro;
107 /* Too early pkts failed from direct transmit */
108 uint64_t early_pkts_tx_failed_woro;
110 uint64_t ro_tx_failed_pkts;
111 } tx __rte_cache_aligned;
115 * Get the last enabled lcore ID
118 * The last enabled lcore ID.
121 get_last_lcore_id(void)
125 for (i = RTE_MAX_LCORE - 1; i >= 0; i--)
126 if (rte_lcore_is_enabled(i))
132 * Get the previous enabled lcore ID
134 * The current lcore ID
136 * The previous enabled lcore ID or the current lcore
137 * ID if it is the first available core.
140 get_previous_lcore_id(unsigned int id)
144 for (i = id - 1; i >= 0; i--)
145 if (rte_lcore_is_enabled(i))
151 pktmbuf_free_bulk(struct rte_mbuf *mbuf_table[], unsigned n)
155 for (i = 0; i < n; i++)
156 rte_pktmbuf_free(mbuf_table[i]);
161 print_usage(const char *prgname)
163 printf("%s [EAL options] -- -p PORTMASK\n"
164 " -p PORTMASK: hexadecimal bitmask of ports to configure\n",
169 parse_portmask(const char *portmask)
174 /* parse hexadecimal string */
175 pm = strtoul(portmask, &end, 16);
176 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
185 /* Parse the argument given in the command line of the application */
187 parse_args(int argc, char **argv)
192 char *prgname = argv[0];
193 static struct option lgopts[] = {
194 {"disable-reorder", 0, 0, 0},
200 while ((opt = getopt_long(argc, argvopt, "p:",
201 lgopts, &option_index)) != EOF) {
205 portmask = parse_portmask(optarg);
207 printf("invalid portmask\n");
208 print_usage(prgname);
214 if (!strcmp(lgopts[option_index].name, "disable-reorder")) {
215 printf("reorder disabled\n");
220 print_usage(prgname);
225 print_usage(prgname);
229 argv[optind-1] = prgname;
230 optind = 0; /* reset getopt lib */
235 * Tx buffer error callback
238 flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
239 void *userdata __rte_unused) {
241 /* free the mbufs which failed from transmit */
242 app_stats.tx.ro_tx_failed_pkts += count;
243 LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
244 pktmbuf_free_bulk(unsent, count);
249 free_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) {
250 const uint8_t nb_ports = rte_eth_dev_count();
253 /* initialize buffers for all ports */
254 for (port_id = 0; port_id < nb_ports; port_id++) {
255 /* skip ports that are not enabled */
256 if ((portmask & (1 << port_id)) == 0)
259 rte_free(tx_buffer[port_id]);
265 configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[])
267 const uint8_t nb_ports = rte_eth_dev_count();
271 /* initialize buffers for all ports */
272 for (port_id = 0; port_id < nb_ports; port_id++) {
273 /* skip ports that are not enabled */
274 if ((portmask & (1 << port_id)) == 0)
277 /* Initialize TX buffers */
278 tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
279 RTE_ETH_TX_BUFFER_SIZE(MAX_PKTS_BURST), 0,
280 rte_eth_dev_socket_id(port_id));
281 if (tx_buffer[port_id] == NULL)
282 rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
285 rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST);
287 ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
288 flush_tx_error_callback, NULL);
290 rte_exit(EXIT_FAILURE, "Cannot set error callback for "
291 "tx buffer on port %u\n", (unsigned) port_id);
297 configure_eth_port(uint8_t port_id)
299 struct ether_addr addr;
300 const uint16_t rxRings = 1, txRings = 1;
301 const uint8_t nb_ports = rte_eth_dev_count();
305 if (port_id > nb_ports)
308 ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf_default);
312 for (q = 0; q < rxRings; q++) {
313 ret = rte_eth_rx_queue_setup(port_id, q, RX_DESC_PER_QUEUE,
314 rte_eth_dev_socket_id(port_id), NULL,
320 for (q = 0; q < txRings; q++) {
321 ret = rte_eth_tx_queue_setup(port_id, q, TX_DESC_PER_QUEUE,
322 rte_eth_dev_socket_id(port_id), NULL);
327 ret = rte_eth_dev_start(port_id);
331 rte_eth_macaddr_get(port_id, &addr);
332 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
333 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
335 addr.addr_bytes[0], addr.addr_bytes[1],
336 addr.addr_bytes[2], addr.addr_bytes[3],
337 addr.addr_bytes[4], addr.addr_bytes[5]);
339 rte_eth_promiscuous_enable(port_id);
347 const uint8_t nb_ports = rte_eth_dev_count();
349 struct rte_eth_stats eth_stats;
351 printf("\nRX thread stats:\n");
352 printf(" - Pkts rxd: %"PRIu64"\n",
353 app_stats.rx.rx_pkts);
354 printf(" - Pkts enqd to workers ring: %"PRIu64"\n",
355 app_stats.rx.enqueue_pkts);
357 printf("\nWorker thread stats:\n");
358 printf(" - Pkts deqd from workers ring: %"PRIu64"\n",
359 app_stats.wkr.dequeue_pkts);
360 printf(" - Pkts enqd to tx ring: %"PRIu64"\n",
361 app_stats.wkr.enqueue_pkts);
362 printf(" - Pkts enq to tx failed: %"PRIu64"\n",
363 app_stats.wkr.enqueue_failed_pkts);
365 printf("\nTX stats:\n");
366 printf(" - Pkts deqd from tx ring: %"PRIu64"\n",
367 app_stats.tx.dequeue_pkts);
368 printf(" - Ro Pkts transmitted: %"PRIu64"\n",
369 app_stats.tx.ro_tx_pkts);
370 printf(" - Ro Pkts tx failed: %"PRIu64"\n",
371 app_stats.tx.ro_tx_failed_pkts);
372 printf(" - Pkts transmitted w/o reorder: %"PRIu64"\n",
373 app_stats.tx.early_pkts_txtd_woro);
374 printf(" - Pkts tx failed w/o reorder: %"PRIu64"\n",
375 app_stats.tx.early_pkts_tx_failed_woro);
377 for (i = 0; i < nb_ports; i++) {
378 rte_eth_stats_get(i, ð_stats);
379 printf("\nPort %u stats:\n", i);
380 printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
381 printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets);
382 printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors);
383 printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors);
384 printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf);
389 int_handler(int sig_num)
391 printf("Exiting on signal %d\n", sig_num);
396 * This thread receives mbufs from the port and affects them an internal
397 * sequence number to keep track of their order of arrival through an
399 * The mbufs are then passed to the worker threads via the rx_to_workers
403 rx_thread(struct rte_ring *ring_out)
405 const uint8_t nb_ports = rte_eth_dev_count();
410 struct rte_mbuf *pkts[MAX_PKTS_BURST];
412 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
415 while (!quit_signal) {
417 for (port_id = 0; port_id < nb_ports; port_id++) {
418 if ((portmask & (1 << port_id)) != 0) {
420 /* receive packets */
421 nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
422 pkts, MAX_PKTS_BURST);
423 if (nb_rx_pkts == 0) {
424 LOG_DEBUG(REORDERAPP,
425 "%s():Received zero packets\n", __func__);
428 app_stats.rx.rx_pkts += nb_rx_pkts;
430 /* mark sequence number */
431 for (i = 0; i < nb_rx_pkts; )
432 pkts[i++]->seqn = seqn++;
434 /* enqueue to rx_to_workers ring */
435 ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
437 app_stats.rx.enqueue_pkts += ret;
438 if (unlikely(ret < nb_rx_pkts)) {
439 app_stats.rx.enqueue_failed_pkts +=
441 pktmbuf_free_bulk(&pkts[ret], nb_rx_pkts - ret);
450 * This thread takes bursts of packets from the rx_to_workers ring and
451 * Changes the input port value to output port value. And feds it to
455 worker_thread(void *args_ptr)
457 const uint8_t nb_ports = rte_eth_dev_count();
459 uint16_t burst_size = 0;
460 struct worker_thread_args *args;
461 struct rte_mbuf *burst_buffer[MAX_PKTS_BURST] = { NULL };
462 struct rte_ring *ring_in, *ring_out;
463 const unsigned xor_val = (nb_ports > 1);
465 args = (struct worker_thread_args *) args_ptr;
466 ring_in = args->ring_in;
467 ring_out = args->ring_out;
469 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
472 while (!quit_signal) {
474 /* dequeue the mbufs from rx_to_workers ring */
475 burst_size = rte_ring_dequeue_burst(ring_in,
476 (void *)burst_buffer, MAX_PKTS_BURST);
477 if (unlikely(burst_size == 0))
480 __sync_fetch_and_add(&app_stats.wkr.dequeue_pkts, burst_size);
482 /* just do some operation on mbuf */
483 for (i = 0; i < burst_size;)
484 burst_buffer[i++]->port ^= xor_val;
486 /* enqueue the modified mbufs to workers_to_tx ring */
487 ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
488 __sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
489 if (unlikely(ret < burst_size)) {
490 /* Return the mbufs to their respective pool, dropping packets */
491 __sync_fetch_and_add(&app_stats.wkr.enqueue_failed_pkts,
492 (int)burst_size - ret);
493 pktmbuf_free_bulk(&burst_buffer[ret], burst_size - ret);
500 * Dequeue mbufs from the workers_to_tx ring and reorder them before
504 send_thread(struct send_thread_args *args)
507 unsigned int i, dret;
508 uint16_t nb_dq_mbufs;
511 struct rte_mbuf *mbufs[MAX_PKTS_BURST];
512 struct rte_mbuf *rombufs[MAX_PKTS_BURST] = {NULL};
513 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
515 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id());
517 configure_tx_buffers(tx_buffer);
519 while (!quit_signal) {
521 /* deque the mbufs from workers_to_tx ring */
522 nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
523 (void *)mbufs, MAX_PKTS_BURST);
525 if (unlikely(nb_dq_mbufs == 0))
528 app_stats.tx.dequeue_pkts += nb_dq_mbufs;
530 for (i = 0; i < nb_dq_mbufs; i++) {
531 /* send dequeued mbufs for reordering */
532 ret = rte_reorder_insert(args->buffer, mbufs[i]);
534 if (ret == -1 && rte_errno == ERANGE) {
535 /* Too early pkts should be transmitted out directly */
536 LOG_DEBUG(REORDERAPP, "%s():Cannot reorder early packet "
537 "direct enqueuing to TX\n", __func__);
538 outp = mbufs[i]->port;
539 if ((portmask & (1 << outp)) == 0) {
540 rte_pktmbuf_free(mbufs[i]);
543 if (rte_eth_tx_burst(outp, 0, (void *)mbufs[i], 1) != 1) {
544 rte_pktmbuf_free(mbufs[i]);
545 app_stats.tx.early_pkts_tx_failed_woro++;
547 app_stats.tx.early_pkts_txtd_woro++;
548 } else if (ret == -1 && rte_errno == ENOSPC) {
550 * Early pkts just outside of window should be dropped
552 rte_pktmbuf_free(mbufs[i]);
557 * drain MAX_PKTS_BURST of reordered
560 dret = rte_reorder_drain(args->buffer, rombufs, MAX_PKTS_BURST);
561 for (i = 0; i < dret; i++) {
563 struct rte_eth_dev_tx_buffer *outbuf;
566 outp1 = rombufs[i]->port;
567 /* skip ports that are not enabled */
568 if ((portmask & (1 << outp1)) == 0) {
569 rte_pktmbuf_free(rombufs[i]);
573 outbuf = tx_buffer[outp1];
574 sent = rte_eth_tx_buffer(outp1, 0, outbuf, rombufs[i]);
576 app_stats.tx.ro_tx_pkts += sent;
580 free_tx_buffers(tx_buffer);
586 * Dequeue mbufs from the workers_to_tx ring and transmit them
589 tx_thread(struct rte_ring *ring_in)
594 struct rte_mbuf *mbufs[MAX_PKTS_BURST];
595 struct rte_eth_dev_tx_buffer *outbuf;
596 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
598 RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
601 configure_tx_buffers(tx_buffer);
603 while (!quit_signal) {
605 /* deque the mbufs from workers_to_tx ring */
606 dqnum = rte_ring_dequeue_burst(ring_in,
607 (void *)mbufs, MAX_PKTS_BURST);
609 if (unlikely(dqnum == 0))
612 app_stats.tx.dequeue_pkts += dqnum;
614 for (i = 0; i < dqnum; i++) {
615 outp = mbufs[i]->port;
616 /* skip ports that are not enabled */
617 if ((portmask & (1 << outp)) == 0) {
618 rte_pktmbuf_free(mbufs[i]);
622 outbuf = tx_buffer[outp];
623 sent = rte_eth_tx_buffer(outp, 0, outbuf, mbufs[i]);
625 app_stats.tx.ro_tx_pkts += sent;
633 main(int argc, char **argv)
637 unsigned int lcore_id, last_lcore_id, master_lcore_id;
639 uint8_t nb_ports_available;
640 struct worker_thread_args worker_args = {NULL, NULL};
641 struct send_thread_args send_args = {NULL, NULL};
642 struct rte_ring *rx_to_workers;
643 struct rte_ring *workers_to_tx;
645 /* catch ctrl-c so we can print on exit */
646 signal(SIGINT, int_handler);
649 ret = rte_eal_init(argc, argv);
656 /* Parse the application specific arguments */
657 ret = parse_args(argc, argv);
661 /* Check if we have enought cores */
662 if (rte_lcore_count() < 3)
663 rte_exit(EXIT_FAILURE, "Error, This application needs at "
664 "least 3 logical cores to run:\n"
665 "1 lcore for packet RX\n"
666 "1 lcore for packet TX\n"
667 "and at least 1 lcore for worker threads\n");
669 nb_ports = rte_eth_dev_count();
671 rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
672 if (nb_ports != 1 && (nb_ports & 1))
673 rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
674 "when using a single port\n");
676 mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL,
677 MBUF_POOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
679 if (mbuf_pool == NULL)
680 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
682 nb_ports_available = nb_ports;
684 /* initialize all ports */
685 for (port_id = 0; port_id < nb_ports; port_id++) {
686 /* skip ports that are not enabled */
687 if ((portmask & (1 << port_id)) == 0) {
688 printf("\nSkipping disabled port %d\n", port_id);
689 nb_ports_available--;
693 printf("Initializing port %u... done\n", (unsigned) port_id);
695 if (configure_eth_port(port_id) != 0)
696 rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
700 if (!nb_ports_available) {
701 rte_exit(EXIT_FAILURE,
702 "All available ports are disabled. Please set portmask.\n");
705 /* Create rings for inter core communication */
706 rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(),
708 if (rx_to_workers == NULL)
709 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
711 workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(),
713 if (workers_to_tx == NULL)
714 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
716 if (!disable_reorder) {
717 send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(),
718 REORDER_BUFFER_SIZE);
719 if (send_args.buffer == NULL)
720 rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
723 last_lcore_id = get_last_lcore_id();
724 master_lcore_id = rte_get_master_lcore();
726 worker_args.ring_in = rx_to_workers;
727 worker_args.ring_out = workers_to_tx;
729 /* Start worker_thread() on all the available slave cores but the last 1 */
730 for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
731 if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
732 rte_eal_remote_launch(worker_thread, (void *)&worker_args,
735 if (disable_reorder) {
736 /* Start tx_thread() on the last slave core */
737 rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
740 send_args.ring_in = workers_to_tx;
741 /* Start send_thread() on the last slave core */
742 rte_eal_remote_launch((lcore_function_t *)send_thread,
743 (void *)&send_args, last_lcore_id);
746 /* Start rx_thread() on the master core */
747 rx_thread(rx_to_workers);
749 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
750 if (rte_eal_wait_lcore(lcore_id) < 0)