4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <rte_ethdev.h>
42 #include <rte_cycles.h>
43 #include <rte_malloc.h>
44 #include <rte_debug.h>
45 #include <rte_prefetch.h>
46 #include <rte_distributor.h>
48 #define RX_RING_SIZE 256
49 #define TX_RING_SIZE 512
50 #define NUM_MBUFS ((64*1024)-1)
51 #define MBUF_CACHE_SIZE 250
53 #define RTE_RING_SZ 1024
55 #define RTE_LOGTYPE_DISTRAPP RTE_LOGTYPE_USER1
57 /* mask of enabled ports */
58 static uint32_t enabled_port_mask;
59 volatile uint8_t quit_signal;
60 volatile uint8_t quit_signal_rx;
62 static volatile struct app_stats {
65 uint64_t returned_pkts;
66 uint64_t enqueued_pkts;
67 } rx __rte_cache_aligned;
70 uint64_t dequeue_pkts;
72 } tx __rte_cache_aligned;
75 static const struct rte_eth_conf port_conf_default = {
77 .mq_mode = ETH_MQ_RX_RSS,
78 .max_rx_pkt_len = ETHER_MAX_LEN,
81 .mq_mode = ETH_MQ_TX_NONE,
85 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
86 ETH_RSS_TCP | ETH_RSS_SCTP,
91 struct output_buffer {
93 struct rte_mbuf *mbufs[BURST_SIZE];
97 * Initialises a given port using global settings and with the rx buffers
98 * coming from the mbuf_pool passed as parameter
101 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
103 struct rte_eth_conf port_conf = port_conf_default;
104 const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
108 if (port >= rte_eth_dev_count())
111 retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
115 for (q = 0; q < rxRings; q++) {
116 retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
117 rte_eth_dev_socket_id(port),
123 for (q = 0; q < txRings; q++) {
124 retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
125 rte_eth_dev_socket_id(port),
131 retval = rte_eth_dev_start(port);
135 struct rte_eth_link link;
136 rte_eth_link_get_nowait(port, &link);
137 if (!link.link_status) {
139 rte_eth_link_get_nowait(port, &link);
142 if (!link.link_status) {
143 printf("Link down on port %"PRIu8"\n", port);
147 struct ether_addr addr;
148 rte_eth_macaddr_get(port, &addr);
149 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
150 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
152 addr.addr_bytes[0], addr.addr_bytes[1],
153 addr.addr_bytes[2], addr.addr_bytes[3],
154 addr.addr_bytes[4], addr.addr_bytes[5]);
156 rte_eth_promiscuous_enable(port);
161 struct lcore_params {
163 struct rte_distributor *d;
165 struct rte_mempool *mem_pool;
169 quit_workers(struct rte_distributor *d, struct rte_mempool *p)
171 const unsigned num_workers = rte_lcore_count() - 2;
173 struct rte_mbuf *bufs[num_workers];
174 rte_mempool_get_bulk(p, (void *)bufs, num_workers);
176 for (i = 0; i < num_workers; i++)
177 bufs[i]->hash.rss = i << 1;
179 rte_distributor_process(d, bufs, num_workers);
180 rte_mempool_put_bulk(p, (void *)bufs, num_workers);
184 lcore_rx(struct lcore_params *p)
186 struct rte_distributor *d = p->d;
187 struct rte_mempool *mem_pool = p->mem_pool;
188 struct rte_ring *r = p->r;
189 const uint8_t nb_ports = rte_eth_dev_count();
190 const int socket_id = rte_socket_id();
193 for (port = 0; port < nb_ports; port++) {
194 /* skip ports that are not enabled */
195 if ((enabled_port_mask & (1 << port)) == 0)
198 if (rte_eth_dev_socket_id(port) > 0 &&
199 rte_eth_dev_socket_id(port) != socket_id)
200 printf("WARNING, port %u is on remote NUMA node to "
201 "RX thread.\n\tPerformance will not "
202 "be optimal.\n", port);
205 printf("\nCore %u doing packet RX.\n", rte_lcore_id());
207 while (!quit_signal_rx) {
209 /* skip ports that are not enabled */
210 if ((enabled_port_mask & (1 << port)) == 0) {
211 if (++port == nb_ports)
215 struct rte_mbuf *bufs[BURST_SIZE*2];
216 const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs,
218 app_stats.rx.rx_pkts += nb_rx;
220 rte_distributor_process(d, bufs, nb_rx);
221 const uint16_t nb_ret = rte_distributor_returned_pkts(d,
223 app_stats.rx.returned_pkts += nb_ret;
224 if (unlikely(nb_ret == 0))
227 uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
228 app_stats.rx.enqueued_pkts += sent;
229 if (unlikely(sent < nb_ret)) {
230 RTE_LOG(DEBUG, DISTRAPP,
231 "%s:Packet loss due to full ring\n", __func__);
232 while (sent < nb_ret)
233 rte_pktmbuf_free(bufs[sent++]);
235 if (++port == nb_ports)
238 rte_distributor_process(d, NULL, 0);
239 /* flush distributor to bring to known state */
240 rte_distributor_flush(d);
241 /* set worker & tx threads quit flag */
244 * worker threads may hang in get packet as
245 * distributor process is not running, just make sure workers
246 * get packets till quit_signal is actually been
247 * received and they gracefully shutdown
249 quit_workers(d, mem_pool);
250 /* rx thread should quit at last */
255 flush_one_port(struct output_buffer *outbuf, uint8_t outp)
257 unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs,
259 app_stats.tx.tx_pkts += nb_tx;
261 if (unlikely(nb_tx < outbuf->count)) {
262 RTE_LOG(DEBUG, DISTRAPP,
263 "%s:Packet loss with tx_burst\n", __func__);
265 rte_pktmbuf_free(outbuf->mbufs[nb_tx]);
266 } while (++nb_tx < outbuf->count);
272 flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
275 for (outp = 0; outp < nb_ports; outp++) {
276 /* skip ports that are not enabled */
277 if ((enabled_port_mask & (1 << outp)) == 0)
280 if (tx_buffers[outp].count == 0)
283 flush_one_port(&tx_buffers[outp], outp);
288 lcore_tx(struct rte_ring *in_r)
290 static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
291 const uint8_t nb_ports = rte_eth_dev_count();
292 const int socket_id = rte_socket_id();
295 for (port = 0; port < nb_ports; port++) {
296 /* skip ports that are not enabled */
297 if ((enabled_port_mask & (1 << port)) == 0)
300 if (rte_eth_dev_socket_id(port) > 0 &&
301 rte_eth_dev_socket_id(port) != socket_id)
302 printf("WARNING, port %u is on remote NUMA node to "
303 "TX thread.\n\tPerformance will not "
304 "be optimal.\n", port);
307 printf("\nCore %u doing packet TX.\n", rte_lcore_id());
308 while (!quit_signal) {
310 for (port = 0; port < nb_ports; port++) {
311 /* skip ports that are not enabled */
312 if ((enabled_port_mask & (1 << port)) == 0)
315 struct rte_mbuf *bufs[BURST_SIZE];
316 const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
317 (void *)bufs, BURST_SIZE);
318 app_stats.tx.dequeue_pkts += nb_rx;
320 /* if we get no traffic, flush anything we have */
321 if (unlikely(nb_rx == 0)) {
322 flush_all_ports(tx_buffers, nb_ports);
326 /* for traffic we receive, queue it up for transmit */
328 rte_prefetch_non_temporal((void *)bufs[0]);
329 rte_prefetch_non_temporal((void *)bufs[1]);
330 rte_prefetch_non_temporal((void *)bufs[2]);
331 for (i = 0; i < nb_rx; i++) {
332 struct output_buffer *outbuf;
334 rte_prefetch_non_temporal((void *)bufs[i + 3]);
336 * workers should update in_port to hold the
339 outp = bufs[i]->port;
340 /* skip ports that are not enabled */
341 if ((enabled_port_mask & (1 << outp)) == 0)
344 outbuf = &tx_buffers[outp];
345 outbuf->mbufs[outbuf->count++] = bufs[i];
346 if (outbuf->count == BURST_SIZE)
347 flush_one_port(outbuf, outp);
355 int_handler(int sig_num)
357 printf("Exiting on signal %d\n", sig_num);
358 /* set quit flag for rx thread to exit */
365 struct rte_eth_stats eth_stats;
368 printf("\nRX thread stats:\n");
369 printf(" - Received: %"PRIu64"\n", app_stats.rx.rx_pkts);
370 printf(" - Processed: %"PRIu64"\n", app_stats.rx.returned_pkts);
371 printf(" - Enqueued: %"PRIu64"\n", app_stats.rx.enqueued_pkts);
373 printf("\nTX thread stats:\n");
374 printf(" - Dequeued: %"PRIu64"\n", app_stats.tx.dequeue_pkts);
375 printf(" - Transmitted: %"PRIu64"\n", app_stats.tx.tx_pkts);
377 for (i = 0; i < rte_eth_dev_count(); i++) {
378 rte_eth_stats_get(i, ð_stats);
379 printf("\nPort %u stats:\n", i);
380 printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
381 printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets);
382 printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors);
383 printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors);
384 printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf);
389 lcore_worker(struct lcore_params *p)
391 struct rte_distributor *d = p->d;
392 const unsigned id = p->worker_id;
394 * for single port, xor_val will be zero so we won't modify the output
395 * port, otherwise we send traffic from 0 to 1, 2 to 3, and vice versa
397 const unsigned xor_val = (rte_eth_dev_count() > 1);
398 struct rte_mbuf *buf = NULL;
400 printf("\nCore %u acting as worker core.\n", rte_lcore_id());
401 while (!quit_signal) {
402 buf = rte_distributor_get_pkt(d, id, buf);
403 buf->port ^= xor_val;
410 print_usage(const char *prgname)
412 printf("%s [EAL options] -- -p PORTMASK\n"
413 " -p PORTMASK: hexadecimal bitmask of ports to configure\n",
418 parse_portmask(const char *portmask)
423 /* parse hexadecimal string */
424 pm = strtoul(portmask, &end, 16);
425 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
434 /* Parse the argument given in the command line of the application */
436 parse_args(int argc, char **argv)
441 char *prgname = argv[0];
442 static struct option lgopts[] = {
448 while ((opt = getopt_long(argc, argvopt, "p:",
449 lgopts, &option_index)) != EOF) {
454 enabled_port_mask = parse_portmask(optarg);
455 if (enabled_port_mask == 0) {
456 printf("invalid portmask\n");
457 print_usage(prgname);
463 print_usage(prgname);
469 print_usage(prgname);
473 argv[optind-1] = prgname;
475 optind = 0; /* reset getopt lib */
479 /* Main function, does initialization and calls the per-lcore functions */
481 main(int argc, char *argv[])
483 struct rte_mempool *mbuf_pool;
484 struct rte_distributor *d;
485 struct rte_ring *output_ring;
486 unsigned lcore_id, worker_id = 0;
489 uint8_t nb_ports_available;
491 /* catch ctrl-c so we can print on exit */
492 signal(SIGINT, int_handler);
495 int ret = rte_eal_init(argc, argv);
497 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
501 /* parse application arguments (after the EAL ones) */
502 ret = parse_args(argc, argv);
504 rte_exit(EXIT_FAILURE, "Invalid distributor parameters\n");
506 if (rte_lcore_count() < 3)
507 rte_exit(EXIT_FAILURE, "Error, This application needs at "
508 "least 3 logical cores to run:\n"
509 "1 lcore for packet RX and distribution\n"
510 "1 lcore for packet TX\n"
511 "and at least 1 lcore for worker threads\n");
513 nb_ports = rte_eth_dev_count();
515 rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
516 if (nb_ports != 1 && (nb_ports & 1))
517 rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except "
518 "when using a single port\n");
520 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
521 NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0,
522 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
523 if (mbuf_pool == NULL)
524 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
525 nb_ports_available = nb_ports;
527 /* initialize all ports */
528 for (portid = 0; portid < nb_ports; portid++) {
529 /* skip ports that are not enabled */
530 if ((enabled_port_mask & (1 << portid)) == 0) {
531 printf("\nSkipping disabled port %d\n", portid);
532 nb_ports_available--;
536 printf("Initializing port %u... done\n", (unsigned) portid);
538 if (port_init(portid, mbuf_pool) != 0)
539 rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
543 if (!nb_ports_available) {
544 rte_exit(EXIT_FAILURE,
545 "All available ports are disabled. Please set portmask.\n");
548 d = rte_distributor_create("PKT_DIST", rte_socket_id(),
549 rte_lcore_count() - 2);
551 rte_exit(EXIT_FAILURE, "Cannot create distributor\n");
554 * scheduler ring is read only by the transmitter core, but written to
555 * by multiple threads
557 output_ring = rte_ring_create("Output_ring", RTE_RING_SZ,
558 rte_socket_id(), RING_F_SC_DEQ);
559 if (output_ring == NULL)
560 rte_exit(EXIT_FAILURE, "Cannot create output ring\n");
562 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
563 if (worker_id == rte_lcore_count() - 2)
564 rte_eal_remote_launch((lcore_function_t *)lcore_tx,
565 output_ring, lcore_id);
567 struct lcore_params *p =
568 rte_malloc(NULL, sizeof(*p), 0);
570 rte_panic("malloc failure\n");
571 *p = (struct lcore_params){worker_id, d, output_ring, mbuf_pool};
573 rte_eal_remote_launch((lcore_function_t *)lcore_worker,
578 /* call lcore_main on master core only */
579 struct lcore_params p = { 0, d, output_ring, mbuf_pool};
582 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
583 if (rte_eal_wait_lcore(lcore_id) < 0)