1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
15 #include <netinet/in.h>
17 #ifdef RTE_EXEC_ENV_LINUXAPP
18 #include <linux/if_tun.h>
21 #include <sys/ioctl.h>
25 #include <rte_common.h>
27 #include <rte_memory.h>
28 #include <rte_memcpy.h>
30 #include <rte_per_lcore.h>
31 #include <rte_launch.h>
32 #include <rte_atomic.h>
33 #include <rte_lcore.h>
34 #include <rte_branch_prediction.h>
35 #include <rte_interrupts.h>
36 #include <rte_debug.h>
37 #include <rte_ether.h>
38 #include <rte_ethdev.h>
39 #include <rte_mempool.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
45 #if (RTE_MAX_LCORE > 64)
46 #define APP_MAX_LCORE 64
48 #define APP_MAX_LCORE RTE_MAX_LCORE
52 /* Macros for printing using RTE_LOG */
53 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
54 #define FATAL_ERROR(fmt, args...) rte_exit(EXIT_FAILURE, fmt "\n", ##args)
55 #define PRINT_INFO(fmt, args...) RTE_LOG(INFO, APP, fmt "\n", ##args)
57 /* Max ports than can be used (each port is associated with two lcores) */
58 #define MAX_PORTS (APP_MAX_LCORE / 2)
60 /* Max size of a single packet */
61 #define MAX_PACKET_SZ (2048)
63 /* Size of the data buffer in each mbuf */
64 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
66 /* Number of mbufs in mempool that is created */
69 /* How many packets to attempt to read from NIC in one go */
70 #define PKT_BURST_SZ 32
72 /* How many objects (mbufs) to keep in per-lcore mempool cache */
73 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
75 /* Number of RX ring descriptors */
78 /* Number of TX ring descriptors */
82 * RX and TX Prefetch, Host, and Write-back threshold values should be
83 * carefully set for optimal performance. Consult the network
84 * controller's datasheet and supporting DPDK documentation for guidance
85 * on how these parameters should be set.
88 /* Options for configuring ethernet port */
89 static struct rte_eth_conf port_conf = {
91 .mq_mode = ETH_MQ_TX_NONE,
95 /* Mempool for mbufs */
96 static struct rte_mempool * pktmbuf_pool = NULL;
98 /* Mask of enabled ports */
99 static uint32_t ports_mask = 0;
101 /* Mask of cores that read from NIC and write to tap */
102 static uint64_t input_cores_mask = 0;
104 /* Mask of cores that read from tap and write to NIC */
105 static uint64_t output_cores_mask = 0;
107 /* Array storing port_id that is associated with each lcore */
108 static uint16_t port_ids[APP_MAX_LCORE];
110 /* Structure type for recording lcore-specific stats */
115 } __rte_cache_aligned;
117 /* Array of lcore-specific stats */
118 static struct stats lcore_stats[APP_MAX_LCORE];
120 /* Print out statistics on packets handled */
126 printf("\n**Exception-Path example application statistics**\n"
127 "======= ====== ============ ============ ===============\n"
128 " Lcore Port RX TX Dropped on TX\n"
129 "------- ------ ------------ ------------ ---------------\n");
130 RTE_LCORE_FOREACH(i) {
131 /* limit ourselves to application supported cores only */
132 if (i >= APP_MAX_LCORE)
134 printf("%6u %7u %13"PRIu64" %13"PRIu64" %16"PRIu64"\n",
135 i, (unsigned)port_ids[i],
136 lcore_stats[i].rx, lcore_stats[i].tx,
137 lcore_stats[i].dropped);
139 printf("======= ====== ============ ============ ===============\n");
142 /* Custom handling of signals to handle stats */
144 signal_handler(int signum)
146 /* When we receive a USR1 signal, print stats */
147 if (signum == SIGUSR1) {
151 /* When we receive a USR2 signal, reset stats */
152 if (signum == SIGUSR2) {
153 memset(&lcore_stats, 0, sizeof(lcore_stats));
154 printf("\n**Statistics have been reset**\n");
159 #ifdef RTE_EXEC_ENV_LINUXAPP
161 * Create a tap network interface, or use existing one with same name.
162 * If name[0]='\0' then a name is automatically assigned and returned in name.
164 static int tap_create(char *name)
169 fd = open("/dev/net/tun", O_RDWR);
173 memset(&ifr, 0, sizeof(ifr));
175 /* TAP device without packet information */
176 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
179 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", name);
181 ret = ioctl(fd, TUNSETIFF, (void *) &ifr);
188 snprintf(name, IFNAMSIZ, "%s", ifr.ifr_name);
194 * Find a free tap network interface, or create a new one.
195 * The name is automatically assigned and returned in name.
197 static int tap_create(char *name)
200 char devname[PATH_MAX];
202 for (i = 0; i < 255; i++) {
203 snprintf(devname, sizeof(devname), "/dev/tap%d", i);
204 fd = open(devname, O_RDWR);
205 if (fd >= 0 || errno != EBUSY)
210 snprintf(name, IFNAMSIZ, "tap%d", i);
216 /* Main processing loop */
218 main_loop(__attribute__((unused)) void *arg)
220 const unsigned lcore_id = rte_lcore_id();
221 char tap_name[IFNAMSIZ];
224 if ((1ULL << lcore_id) & input_cores_mask) {
225 /* Create new tap interface */
226 snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
227 tap_fd = tap_create(tap_name);
229 FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
232 PRINT_INFO("Lcore %u is reading from port %u and writing to %s",
233 lcore_id, (unsigned)port_ids[lcore_id], tap_name);
235 /* Loop forever reading from NIC and writing to tap */
237 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
239 const unsigned nb_rx =
240 rte_eth_rx_burst(port_ids[lcore_id], 0,
241 pkts_burst, PKT_BURST_SZ);
242 lcore_stats[lcore_id].rx += nb_rx;
243 for (i = 0; likely(i < nb_rx); i++) {
244 struct rte_mbuf *m = pkts_burst[i];
245 /* Ignore return val from write() */
246 int ret = write(tap_fd,
247 rte_pktmbuf_mtod(m, void*),
248 rte_pktmbuf_data_len(m));
250 if (unlikely(ret < 0))
251 lcore_stats[lcore_id].dropped++;
253 lcore_stats[lcore_id].tx++;
257 else if ((1ULL << lcore_id) & output_cores_mask) {
258 /* Create new tap interface */
259 snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
260 tap_fd = tap_create(tap_name);
262 FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
265 PRINT_INFO("Lcore %u is reading from %s and writing to port %u",
266 lcore_id, tap_name, (unsigned)port_ids[lcore_id]);
268 /* Loop forever reading from tap and writing to NIC */
271 struct rte_mbuf *m = rte_pktmbuf_alloc(pktmbuf_pool);
275 ret = read(tap_fd, rte_pktmbuf_mtod(m, void *),
277 lcore_stats[lcore_id].rx++;
278 if (unlikely(ret < 0)) {
279 FATAL_ERROR("Reading from %s interface failed",
284 m->pkt_len = (uint16_t)ret;
285 m->data_len = (uint16_t)ret;
286 ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
287 if (unlikely(ret < 1)) {
289 lcore_stats[lcore_id].dropped++;
292 lcore_stats[lcore_id].tx++;
297 PRINT_INFO("Lcore %u has nothing to do", lcore_id);
301 * Tap file is closed automatically when program exits. Putting close()
302 * here will cause the compiler to give an error about unreachable code.
306 /* Display usage instructions */
308 print_usage(const char *prgname)
310 PRINT_INFO("\nUsage: %s [EAL options] -- -p PORTMASK -i IN_CORES -o OUT_CORES\n"
311 " -p PORTMASK: hex bitmask of ports to use\n"
312 " -i IN_CORES: hex bitmask of cores which read from NIC\n"
313 " -o OUT_CORES: hex bitmask of cores which write to NIC",
317 /* Convert string to unsigned number. 0 is returned if error occurs */
319 parse_unsigned(const char *portmask)
324 num = strtoull(portmask, &end, 16);
325 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
328 return (uint64_t)num;
331 /* Record affinities between ports and lcores in global port_ids[] array */
333 setup_port_lcore_affinities(void)
336 uint16_t tx_port = 0;
337 uint16_t rx_port = 0;
339 /* Setup port_ids[] array, and check masks were ok */
340 for (i = 0; i < APP_MAX_LCORE; i++) {
341 if (!rte_lcore_is_enabled(i))
343 if (input_cores_mask & (1ULL << i)) {
344 /* Skip ports that are not enabled */
345 while ((ports_mask & (1 << rx_port)) == 0) {
347 if (rx_port > (sizeof(ports_mask) * 8))
348 goto fail; /* not enough ports */
351 port_ids[i] = rx_port++;
352 } else if (output_cores_mask & (1ULL << (i & 0x3f))) {
353 /* Skip ports that are not enabled */
354 while ((ports_mask & (1 << tx_port)) == 0) {
356 if (tx_port > (sizeof(ports_mask) * 8))
357 goto fail; /* not enough ports */
360 port_ids[i] = tx_port++;
364 if (rx_port != tx_port)
365 goto fail; /* uneven number of cores in masks */
367 if (ports_mask & (~((1 << rx_port) - 1)))
368 goto fail; /* unused ports */
372 FATAL_ERROR("Invalid core/port masks specified on command line");
375 /* Parse the arguments given in the command line of the application */
377 parse_args(int argc, char **argv)
380 const char *prgname = argv[0];
382 /* Disable printing messages within getopt() */
385 /* Parse command line */
386 while ((opt = getopt(argc, argv, "i:o:p:")) != EOF) {
389 input_cores_mask = parse_unsigned(optarg);
392 output_cores_mask = parse_unsigned(optarg);
395 ports_mask = parse_unsigned(optarg);
398 print_usage(prgname);
399 FATAL_ERROR("Invalid option specified");
403 /* Check that options were parsed ok */
404 if (input_cores_mask == 0) {
405 print_usage(prgname);
406 FATAL_ERROR("IN_CORES not specified correctly");
408 if (output_cores_mask == 0) {
409 print_usage(prgname);
410 FATAL_ERROR("OUT_CORES not specified correctly");
412 if (ports_mask == 0) {
413 print_usage(prgname);
414 FATAL_ERROR("PORTMASK not specified correctly");
417 setup_port_lcore_affinities();
420 /* Initialise a single port on an Ethernet device */
422 init_port(uint16_t port)
425 uint16_t nb_rxd = NB_RXD;
426 uint16_t nb_txd = NB_TXD;
427 struct rte_eth_dev_info dev_info;
428 struct rte_eth_rxconf rxq_conf;
429 struct rte_eth_txconf txq_conf;
430 struct rte_eth_conf local_port_conf = port_conf;
432 /* Initialise device and RX/TX queues */
433 PRINT_INFO("Initialising port %u ...", port);
435 rte_eth_dev_info_get(port, &dev_info);
436 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
437 local_port_conf.txmode.offloads |=
438 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
439 ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
441 FATAL_ERROR("Could not configure port%u (%d)", port, ret);
443 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
445 FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
448 rxq_conf = dev_info.default_rxconf;
449 rxq_conf.offloads = local_port_conf.rxmode.offloads;
450 ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
451 rte_eth_dev_socket_id(port),
455 FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
458 txq_conf = dev_info.default_txconf;
459 txq_conf.offloads = local_port_conf.txmode.offloads;
460 ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
461 rte_eth_dev_socket_id(port),
464 FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
467 ret = rte_eth_dev_start(port);
469 FATAL_ERROR("Could not start port%u (%d)", port, ret);
471 rte_eth_promiscuous_enable(port);
474 /* Check the link status of all ports in up to 9s, and print them finally */
476 check_all_ports_link_status(uint32_t port_mask)
478 #define CHECK_INTERVAL 100 /* 100ms */
479 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
481 uint8_t count, all_ports_up, print_flag = 0;
482 struct rte_eth_link link;
484 printf("\nChecking link status");
486 for (count = 0; count <= MAX_CHECK_TIME; count++) {
488 RTE_ETH_FOREACH_DEV(portid) {
489 if ((port_mask & (1 << portid)) == 0)
491 memset(&link, 0, sizeof(link));
492 rte_eth_link_get_nowait(portid, &link);
493 /* print link status if flag set */
494 if (print_flag == 1) {
495 if (link.link_status)
497 "Port%d Link Up. Speed %u Mbps - %s\n",
498 portid, link.link_speed,
499 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
500 ("full-duplex") : ("half-duplex\n"));
502 printf("Port %d Link Down\n", portid);
505 /* clear all_ports_up flag if any link down */
506 if (link.link_status == ETH_LINK_DOWN) {
511 /* after finally printing all link status, get out */
515 if (all_ports_up == 0) {
518 rte_delay_ms(CHECK_INTERVAL);
521 /* set the print_flag if all ports up or timeout */
522 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
529 /* Initialise ports/queues etc. and start main loop on each core */
531 main(int argc, char** argv)
534 unsigned i,high_port;
535 uint16_t nb_sys_ports, port;
537 /* Associate signal_hanlder function with USR signals */
538 signal(SIGUSR1, signal_handler);
539 signal(SIGUSR2, signal_handler);
542 ret = rte_eal_init(argc, argv);
544 FATAL_ERROR("Could not initialise EAL (%d)", ret);
548 /* Parse application arguments (after the EAL ones) */
549 parse_args(argc, argv);
551 /* Create the mbuf pool */
552 pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
553 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
554 if (pktmbuf_pool == NULL) {
555 FATAL_ERROR("Could not initialise mbuf pool");
559 /* Get number of ports found in scan */
560 nb_sys_ports = rte_eth_dev_count_avail();
561 if (nb_sys_ports == 0)
562 FATAL_ERROR("No supported Ethernet device found");
563 /* Find highest port set in portmask */
564 for (high_port = (sizeof(ports_mask) * 8) - 1;
565 (high_port != 0) && !(ports_mask & (1 << high_port));
568 if (high_port > nb_sys_ports)
569 FATAL_ERROR("Port mask requires more ports than available");
571 /* Initialise each port */
572 RTE_ETH_FOREACH_DEV(port) {
573 /* Skip ports that are not enabled */
574 if ((ports_mask & (1 << port)) == 0) {
579 check_all_ports_link_status(ports_mask);
581 /* Launch per-lcore function on every lcore */
582 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
583 RTE_LCORE_FOREACH_SLAVE(i) {
584 if (rte_eal_wait_lcore(i) < 0)