1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
15 #include <netinet/in.h>
17 #ifdef RTE_EXEC_ENV_LINUXAPP
18 #include <linux/if_tun.h>
21 #include <sys/ioctl.h>
25 #include <rte_common.h>
27 #include <rte_memory.h>
28 #include <rte_memcpy.h>
30 #include <rte_per_lcore.h>
31 #include <rte_launch.h>
32 #include <rte_atomic.h>
33 #include <rte_lcore.h>
34 #include <rte_branch_prediction.h>
35 #include <rte_interrupts.h>
36 #include <rte_debug.h>
37 #include <rte_ether.h>
38 #include <rte_ethdev.h>
39 #include <rte_mempool.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
45 #if (RTE_MAX_LCORE > 64)
46 #define APP_MAX_LCORE 64
48 #define APP_MAX_LCORE RTE_MAX_LCORE
52 /* Macros for printing using RTE_LOG */
53 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
54 #define FATAL_ERROR(fmt, args...) rte_exit(EXIT_FAILURE, fmt "\n", ##args)
55 #define PRINT_INFO(fmt, args...) RTE_LOG(INFO, APP, fmt "\n", ##args)
57 /* Max ports than can be used (each port is associated with two lcores) */
58 #define MAX_PORTS (APP_MAX_LCORE / 2)
60 /* Max size of a single packet */
61 #define MAX_PACKET_SZ (2048)
63 /* Size of the data buffer in each mbuf */
64 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
66 /* Number of mbufs in mempool that is created */
69 /* How many packets to attempt to read from NIC in one go */
70 #define PKT_BURST_SZ 32
72 /* How many objects (mbufs) to keep in per-lcore mempool cache */
73 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
75 /* Number of RX ring descriptors */
78 /* Number of TX ring descriptors */
82 * RX and TX Prefetch, Host, and Write-back threshold values should be
83 * carefully set for optimal performance. Consult the network
84 * controller's datasheet and supporting DPDK documentation for guidance
85 * on how these parameters should be set.
88 /* Options for configuring ethernet port */
89 static struct rte_eth_conf port_conf = {
91 .ignore_offload_bitfield = 1,
92 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
95 .mq_mode = ETH_MQ_TX_NONE,
99 /* Mempool for mbufs */
100 static struct rte_mempool * pktmbuf_pool = NULL;
102 /* Mask of enabled ports */
103 static uint32_t ports_mask = 0;
105 /* Mask of cores that read from NIC and write to tap */
106 static uint64_t input_cores_mask = 0;
108 /* Mask of cores that read from tap and write to NIC */
109 static uint64_t output_cores_mask = 0;
111 /* Array storing port_id that is associated with each lcore */
112 static uint16_t port_ids[APP_MAX_LCORE];
114 /* Structure type for recording lcore-specific stats */
119 } __rte_cache_aligned;
121 /* Array of lcore-specific stats */
122 static struct stats lcore_stats[APP_MAX_LCORE];
124 /* Print out statistics on packets handled */
130 printf("\n**Exception-Path example application statistics**\n"
131 "======= ====== ============ ============ ===============\n"
132 " Lcore Port RX TX Dropped on TX\n"
133 "------- ------ ------------ ------------ ---------------\n");
134 RTE_LCORE_FOREACH(i) {
135 printf("%6u %7u %13"PRIu64" %13"PRIu64" %16"PRIu64"\n",
136 i, (unsigned)port_ids[i],
137 lcore_stats[i].rx, lcore_stats[i].tx,
138 lcore_stats[i].dropped);
140 printf("======= ====== ============ ============ ===============\n");
143 /* Custom handling of signals to handle stats */
145 signal_handler(int signum)
147 /* When we receive a USR1 signal, print stats */
148 if (signum == SIGUSR1) {
152 /* When we receive a USR2 signal, reset stats */
153 if (signum == SIGUSR2) {
154 memset(&lcore_stats, 0, sizeof(lcore_stats));
155 printf("\n**Statistics have been reset**\n");
160 #ifdef RTE_EXEC_ENV_LINUXAPP
162 * Create a tap network interface, or use existing one with same name.
163 * If name[0]='\0' then a name is automatically assigned and returned in name.
165 static int tap_create(char *name)
170 fd = open("/dev/net/tun", O_RDWR);
174 memset(&ifr, 0, sizeof(ifr));
176 /* TAP device without packet information */
177 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
180 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", name);
182 ret = ioctl(fd, TUNSETIFF, (void *) &ifr);
189 snprintf(name, IFNAMSIZ, "%s", ifr.ifr_name);
195 * Find a free tap network interface, or create a new one.
196 * The name is automatically assigned and returned in name.
198 static int tap_create(char *name)
201 char devname[PATH_MAX];
203 for (i = 0; i < 255; i++) {
204 snprintf(devname, sizeof(devname), "/dev/tap%d", i);
205 fd = open(devname, O_RDWR);
206 if (fd >= 0 || errno != EBUSY)
211 snprintf(name, IFNAMSIZ, "tap%d", i);
217 /* Main processing loop */
219 main_loop(__attribute__((unused)) void *arg)
221 const unsigned lcore_id = rte_lcore_id();
222 char tap_name[IFNAMSIZ];
225 if ((1ULL << lcore_id) & input_cores_mask) {
226 /* Create new tap interface */
227 snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
228 tap_fd = tap_create(tap_name);
230 FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
233 PRINT_INFO("Lcore %u is reading from port %u and writing to %s",
234 lcore_id, (unsigned)port_ids[lcore_id], tap_name);
236 /* Loop forever reading from NIC and writing to tap */
238 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
240 const unsigned nb_rx =
241 rte_eth_rx_burst(port_ids[lcore_id], 0,
242 pkts_burst, PKT_BURST_SZ);
243 lcore_stats[lcore_id].rx += nb_rx;
244 for (i = 0; likely(i < nb_rx); i++) {
245 struct rte_mbuf *m = pkts_burst[i];
246 /* Ignore return val from write() */
247 int ret = write(tap_fd,
248 rte_pktmbuf_mtod(m, void*),
249 rte_pktmbuf_data_len(m));
251 if (unlikely(ret < 0))
252 lcore_stats[lcore_id].dropped++;
254 lcore_stats[lcore_id].tx++;
258 else if ((1ULL << lcore_id) & output_cores_mask) {
259 /* Create new tap interface */
260 snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
261 tap_fd = tap_create(tap_name);
263 FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
266 PRINT_INFO("Lcore %u is reading from %s and writing to port %u",
267 lcore_id, tap_name, (unsigned)port_ids[lcore_id]);
269 /* Loop forever reading from tap and writing to NIC */
272 struct rte_mbuf *m = rte_pktmbuf_alloc(pktmbuf_pool);
276 ret = read(tap_fd, rte_pktmbuf_mtod(m, void *),
278 lcore_stats[lcore_id].rx++;
279 if (unlikely(ret < 0)) {
280 FATAL_ERROR("Reading from %s interface failed",
285 m->pkt_len = (uint16_t)ret;
286 m->data_len = (uint16_t)ret;
287 ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
288 if (unlikely(ret < 1)) {
290 lcore_stats[lcore_id].dropped++;
293 lcore_stats[lcore_id].tx++;
298 PRINT_INFO("Lcore %u has nothing to do", lcore_id);
302 * Tap file is closed automatically when program exits. Putting close()
303 * here will cause the compiler to give an error about unreachable code.
307 /* Display usage instructions */
309 print_usage(const char *prgname)
311 PRINT_INFO("\nUsage: %s [EAL options] -- -p PORTMASK -i IN_CORES -o OUT_CORES\n"
312 " -p PORTMASK: hex bitmask of ports to use\n"
313 " -i IN_CORES: hex bitmask of cores which read from NIC\n"
314 " -o OUT_CORES: hex bitmask of cores which write to NIC",
318 /* Convert string to unsigned number. 0 is returned if error occurs */
320 parse_unsigned(const char *portmask)
325 num = strtoull(portmask, &end, 16);
326 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
329 return (uint64_t)num;
332 /* Record affinities between ports and lcores in global port_ids[] array */
334 setup_port_lcore_affinities(void)
337 uint16_t tx_port = 0;
338 uint16_t rx_port = 0;
340 /* Setup port_ids[] array, and check masks were ok */
341 for (i = 0; i < APP_MAX_LCORE; i++) {
342 if (!rte_lcore_is_enabled(i))
344 if (input_cores_mask & (1ULL << i)) {
345 /* Skip ports that are not enabled */
346 while ((ports_mask & (1 << rx_port)) == 0) {
348 if (rx_port > (sizeof(ports_mask) * 8))
349 goto fail; /* not enough ports */
352 port_ids[i] = rx_port++;
353 } else if (output_cores_mask & (1ULL << (i & 0x3f))) {
354 /* Skip ports that are not enabled */
355 while ((ports_mask & (1 << tx_port)) == 0) {
357 if (tx_port > (sizeof(ports_mask) * 8))
358 goto fail; /* not enough ports */
361 port_ids[i] = tx_port++;
365 if (rx_port != tx_port)
366 goto fail; /* uneven number of cores in masks */
368 if (ports_mask & (~((1 << rx_port) - 1)))
369 goto fail; /* unused ports */
373 FATAL_ERROR("Invalid core/port masks specified on command line");
376 /* Parse the arguments given in the command line of the application */
378 parse_args(int argc, char **argv)
381 const char *prgname = argv[0];
383 /* Disable printing messages within getopt() */
386 /* Parse command line */
387 while ((opt = getopt(argc, argv, "i:o:p:")) != EOF) {
390 input_cores_mask = parse_unsigned(optarg);
393 output_cores_mask = parse_unsigned(optarg);
396 ports_mask = parse_unsigned(optarg);
399 print_usage(prgname);
400 FATAL_ERROR("Invalid option specified");
404 /* Check that options were parsed ok */
405 if (input_cores_mask == 0) {
406 print_usage(prgname);
407 FATAL_ERROR("IN_CORES not specified correctly");
409 if (output_cores_mask == 0) {
410 print_usage(prgname);
411 FATAL_ERROR("OUT_CORES not specified correctly");
413 if (ports_mask == 0) {
414 print_usage(prgname);
415 FATAL_ERROR("PORTMASK not specified correctly");
418 setup_port_lcore_affinities();
421 /* Initialise a single port on an Ethernet device */
423 init_port(uint16_t port)
426 uint16_t nb_rxd = NB_RXD;
427 uint16_t nb_txd = NB_TXD;
428 struct rte_eth_dev_info dev_info;
429 struct rte_eth_rxconf rxq_conf;
430 struct rte_eth_txconf txq_conf;
431 struct rte_eth_conf local_port_conf = port_conf;
433 /* Initialise device and RX/TX queues */
434 PRINT_INFO("Initialising port %u ...", port);
436 rte_eth_dev_info_get(port, &dev_info);
437 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
438 local_port_conf.txmode.offloads |=
439 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
440 ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
442 FATAL_ERROR("Could not configure port%u (%d)", port, ret);
444 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
446 FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
449 rxq_conf = dev_info.default_rxconf;
450 rxq_conf.offloads = local_port_conf.rxmode.offloads;
451 ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
452 rte_eth_dev_socket_id(port),
456 FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
459 txq_conf = dev_info.default_txconf;
460 txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
461 txq_conf.offloads = local_port_conf.txmode.offloads;
462 ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
463 rte_eth_dev_socket_id(port),
466 FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
469 ret = rte_eth_dev_start(port);
471 FATAL_ERROR("Could not start port%u (%d)", port, ret);
473 rte_eth_promiscuous_enable(port);
476 /* Check the link status of all ports in up to 9s, and print them finally */
478 check_all_ports_link_status(uint32_t port_mask)
480 #define CHECK_INTERVAL 100 /* 100ms */
481 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
483 uint8_t count, all_ports_up, print_flag = 0;
484 struct rte_eth_link link;
486 printf("\nChecking link status");
488 for (count = 0; count <= MAX_CHECK_TIME; count++) {
490 RTE_ETH_FOREACH_DEV(portid) {
491 if ((port_mask & (1 << portid)) == 0)
493 memset(&link, 0, sizeof(link));
494 rte_eth_link_get_nowait(portid, &link);
495 /* print link status if flag set */
496 if (print_flag == 1) {
497 if (link.link_status)
499 "Port%d Link Up. Speed %u Mbps - %s\n",
500 portid, link.link_speed,
501 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
502 ("full-duplex") : ("half-duplex\n"));
504 printf("Port %d Link Down\n", portid);
507 /* clear all_ports_up flag if any link down */
508 if (link.link_status == ETH_LINK_DOWN) {
513 /* after finally printing all link status, get out */
517 if (all_ports_up == 0) {
520 rte_delay_ms(CHECK_INTERVAL);
523 /* set the print_flag if all ports up or timeout */
524 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
531 /* Initialise ports/queues etc. and start main loop on each core */
533 main(int argc, char** argv)
536 unsigned i,high_port;
537 uint16_t nb_sys_ports, port;
539 /* Associate signal_hanlder function with USR signals */
540 signal(SIGUSR1, signal_handler);
541 signal(SIGUSR2, signal_handler);
544 ret = rte_eal_init(argc, argv);
546 FATAL_ERROR("Could not initialise EAL (%d)", ret);
550 /* Parse application arguments (after the EAL ones) */
551 parse_args(argc, argv);
553 /* Create the mbuf pool */
554 pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
555 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
556 if (pktmbuf_pool == NULL) {
557 FATAL_ERROR("Could not initialise mbuf pool");
561 /* Get number of ports found in scan */
562 nb_sys_ports = rte_eth_dev_count();
563 if (nb_sys_ports == 0)
564 FATAL_ERROR("No supported Ethernet device found");
565 /* Find highest port set in portmask */
566 for (high_port = (sizeof(ports_mask) * 8) - 1;
567 (high_port != 0) && !(ports_mask & (1 << high_port));
570 if (high_port > nb_sys_ports)
571 FATAL_ERROR("Port mask requires more ports than available");
573 /* Initialise each port */
574 RTE_ETH_FOREACH_DEV(port) {
575 /* Skip ports that are not enabled */
576 if ((ports_mask & (1 << port)) == 0) {
581 check_all_ports_link_status(ports_mask);
583 /* Launch per-lcore function on every lcore */
584 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
585 RTE_LCORE_FOREACH_SLAVE(i) {
586 if (rte_eal_wait_lcore(i) < 0)