4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
44 #include <netinet/in.h>
46 #ifdef RTE_EXEC_ENV_LINUXAPP
47 #include <linux/if_tun.h>
50 #include <sys/ioctl.h>
54 #include <rte_common.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
60 #include <rte_per_lcore.h>
61 #include <rte_launch.h>
62 #include <rte_atomic.h>
63 #include <rte_lcore.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_interrupts.h>
67 #include <rte_debug.h>
68 #include <rte_ether.h>
69 #include <rte_ethdev.h>
70 #include <rte_mempool.h>
72 #include <rte_string_fns.h>
73 #include <rte_cycles.h>
75 /* Macros for printing using RTE_LOG */
76 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
77 #define FATAL_ERROR(fmt, args...) rte_exit(EXIT_FAILURE, fmt "\n", ##args)
78 #define PRINT_INFO(fmt, args...) RTE_LOG(INFO, APP, fmt "\n", ##args)
80 /* Max ports than can be used (each port is associated with two lcores) */
81 #define MAX_PORTS (RTE_MAX_LCORE / 2)
83 /* Max size of a single packet */
84 #define MAX_PACKET_SZ (2048)
86 /* Size of the data buffer in each mbuf */
87 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
89 /* Number of mbufs in mempool that is created */
92 /* How many packets to attempt to read from NIC in one go */
93 #define PKT_BURST_SZ 32
95 /* How many objects (mbufs) to keep in per-lcore mempool cache */
96 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
98 /* Number of RX ring descriptors */
101 /* Number of TX ring descriptors */
105 * RX and TX Prefetch, Host, and Write-back threshold values should be
106 * carefully set for optimal performance. Consult the network
107 * controller's datasheet and supporting DPDK documentation for guidance
108 * on how these parameters should be set.
111 /* Options for configuring ethernet port */
112 static const struct rte_eth_conf port_conf = {
114 .header_split = 0, /* Header Split disabled */
115 .hw_ip_checksum = 0, /* IP checksum offload disabled */
116 .hw_vlan_filter = 0, /* VLAN filtering disabled */
117 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
118 .hw_strip_crc = 1, /* CRC stripped by hardware */
121 .mq_mode = ETH_MQ_TX_NONE,
125 /* Mempool for mbufs */
126 static struct rte_mempool * pktmbuf_pool = NULL;
128 /* Mask of enabled ports */
129 static uint32_t ports_mask = 0;
131 /* Mask of cores that read from NIC and write to tap */
132 static uint64_t input_cores_mask = 0;
134 /* Mask of cores that read from tap and write to NIC */
135 static uint64_t output_cores_mask = 0;
137 /* Array storing port_id that is associated with each lcore */
138 static uint16_t port_ids[RTE_MAX_LCORE];
140 /* Structure type for recording lcore-specific stats */
147 /* Array of lcore-specific stats */
148 static struct stats lcore_stats[RTE_MAX_LCORE];
150 /* Print out statistics on packets handled */
156 printf("\n**Exception-Path example application statistics**\n"
157 "======= ====== ============ ============ ===============\n"
158 " Lcore Port RX TX Dropped on TX\n"
159 "------- ------ ------------ ------------ ---------------\n");
160 RTE_LCORE_FOREACH(i) {
161 printf("%6u %7u %13"PRIu64" %13"PRIu64" %16"PRIu64"\n",
162 i, (unsigned)port_ids[i],
163 lcore_stats[i].rx, lcore_stats[i].tx,
164 lcore_stats[i].dropped);
166 printf("======= ====== ============ ============ ===============\n");
169 /* Custom handling of signals to handle stats */
171 signal_handler(int signum)
173 /* When we receive a USR1 signal, print stats */
174 if (signum == SIGUSR1) {
178 /* When we receive a USR2 signal, reset stats */
179 if (signum == SIGUSR2) {
180 memset(&lcore_stats, 0, sizeof(lcore_stats));
181 printf("\n**Statistics have been reset**\n");
186 #ifdef RTE_EXEC_ENV_LINUXAPP
188 * Create a tap network interface, or use existing one with same name.
189 * If name[0]='\0' then a name is automatically assigned and returned in name.
191 static int tap_create(char *name)
196 fd = open("/dev/net/tun", O_RDWR);
200 memset(&ifr, 0, sizeof(ifr));
202 /* TAP device without packet information */
203 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
206 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", name);
208 ret = ioctl(fd, TUNSETIFF, (void *) &ifr);
215 snprintf(name, IFNAMSIZ, "%s", ifr.ifr_name);
221 * Find a free tap network interface, or create a new one.
222 * The name is automatically assigned and returned in name.
224 static int tap_create(char *name)
227 char devname[PATH_MAX];
229 for (i = 0; i < 255; i++) {
230 snprintf(devname, sizeof(devname), "/dev/tap%d", i);
231 fd = open(devname, O_RDWR);
232 if (fd >= 0 || errno != EBUSY)
237 snprintf(name, IFNAMSIZ, "tap%d", i);
243 /* Main processing loop */
245 main_loop(__attribute__((unused)) void *arg)
247 const unsigned lcore_id = rte_lcore_id();
248 char tap_name[IFNAMSIZ];
251 if ((1ULL << lcore_id) & input_cores_mask) {
252 /* Create new tap interface */
253 snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
254 tap_fd = tap_create(tap_name);
256 FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
259 PRINT_INFO("Lcore %u is reading from port %u and writing to %s",
260 lcore_id, (unsigned)port_ids[lcore_id], tap_name);
262 /* Loop forever reading from NIC and writing to tap */
264 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
266 const unsigned nb_rx =
267 rte_eth_rx_burst(port_ids[lcore_id], 0,
268 pkts_burst, PKT_BURST_SZ);
269 lcore_stats[lcore_id].rx += nb_rx;
270 for (i = 0; likely(i < nb_rx); i++) {
271 struct rte_mbuf *m = pkts_burst[i];
272 /* Ignore return val from write() */
273 int ret = write(tap_fd,
274 rte_pktmbuf_mtod(m, void*),
275 rte_pktmbuf_data_len(m));
277 if (unlikely(ret < 0))
278 lcore_stats[lcore_id].dropped++;
280 lcore_stats[lcore_id].tx++;
284 else if ((1ULL << lcore_id) & output_cores_mask) {
285 /* Create new tap interface */
286 snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
287 tap_fd = tap_create(tap_name);
289 FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
292 PRINT_INFO("Lcore %u is reading from %s and writing to port %u",
293 lcore_id, tap_name, (unsigned)port_ids[lcore_id]);
295 /* Loop forever reading from tap and writing to NIC */
298 struct rte_mbuf *m = rte_pktmbuf_alloc(pktmbuf_pool);
302 ret = read(tap_fd, rte_pktmbuf_mtod(m, void *),
304 lcore_stats[lcore_id].rx++;
305 if (unlikely(ret < 0)) {
306 FATAL_ERROR("Reading from %s interface failed",
311 m->pkt_len = (uint16_t)ret;
312 m->data_len = (uint16_t)ret;
313 ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
314 if (unlikely(ret < 1)) {
316 lcore_stats[lcore_id].dropped++;
319 lcore_stats[lcore_id].tx++;
324 PRINT_INFO("Lcore %u has nothing to do", lcore_id);
328 * Tap file is closed automatically when program exits. Putting close()
329 * here will cause the compiler to give an error about unreachable code.
333 /* Display usage instructions */
335 print_usage(const char *prgname)
337 PRINT_INFO("\nUsage: %s [EAL options] -- -p PORTMASK -i IN_CORES -o OUT_CORES\n"
338 " -p PORTMASK: hex bitmask of ports to use\n"
339 " -i IN_CORES: hex bitmask of cores which read from NIC\n"
340 " -o OUT_CORES: hex bitmask of cores which write to NIC",
344 /* Convert string to unsigned number. 0 is returned if error occurs */
346 parse_unsigned(const char *portmask)
351 num = strtoull(portmask, &end, 16);
352 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
355 return (uint64_t)num;
358 /* Record affinities between ports and lcores in global port_ids[] array */
360 setup_port_lcore_affinities(void)
363 uint16_t tx_port = 0;
364 uint16_t rx_port = 0;
366 /* Setup port_ids[] array, and check masks were ok */
367 RTE_LCORE_FOREACH(i) {
368 if (input_cores_mask & (1ULL << i)) {
369 /* Skip ports that are not enabled */
370 while ((ports_mask & (1 << rx_port)) == 0) {
372 if (rx_port > (sizeof(ports_mask) * 8))
373 goto fail; /* not enough ports */
376 port_ids[i] = rx_port++;
377 } else if (output_cores_mask & (1ULL << (i & 0x3f))) {
378 /* Skip ports that are not enabled */
379 while ((ports_mask & (1 << tx_port)) == 0) {
381 if (tx_port > (sizeof(ports_mask) * 8))
382 goto fail; /* not enough ports */
385 port_ids[i] = tx_port++;
389 if (rx_port != tx_port)
390 goto fail; /* uneven number of cores in masks */
392 if (ports_mask & (~((1 << rx_port) - 1)))
393 goto fail; /* unused ports */
397 FATAL_ERROR("Invalid core/port masks specified on command line");
400 /* Parse the arguments given in the command line of the application */
402 parse_args(int argc, char **argv)
405 const char *prgname = argv[0];
407 /* Disable printing messages within getopt() */
410 /* Parse command line */
411 while ((opt = getopt(argc, argv, "i:o:p:")) != EOF) {
414 input_cores_mask = parse_unsigned(optarg);
417 output_cores_mask = parse_unsigned(optarg);
420 ports_mask = parse_unsigned(optarg);
423 print_usage(prgname);
424 FATAL_ERROR("Invalid option specified");
428 /* Check that options were parsed ok */
429 if (input_cores_mask == 0) {
430 print_usage(prgname);
431 FATAL_ERROR("IN_CORES not specified correctly");
433 if (output_cores_mask == 0) {
434 print_usage(prgname);
435 FATAL_ERROR("OUT_CORES not specified correctly");
437 if (ports_mask == 0) {
438 print_usage(prgname);
439 FATAL_ERROR("PORTMASK not specified correctly");
442 setup_port_lcore_affinities();
445 /* Initialise a single port on an Ethernet device */
447 init_port(uint16_t port)
450 uint16_t nb_rxd = NB_RXD;
451 uint16_t nb_txd = NB_TXD;
453 /* Initialise device and RX/TX queues */
454 PRINT_INFO("Initialising port %u ...", port);
456 ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
458 FATAL_ERROR("Could not configure port%u (%d)", port, ret);
460 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
462 FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
465 ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
466 rte_eth_dev_socket_id(port),
470 FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
473 ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
474 rte_eth_dev_socket_id(port),
477 FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
480 ret = rte_eth_dev_start(port);
482 FATAL_ERROR("Could not start port%u (%d)", port, ret);
484 rte_eth_promiscuous_enable(port);
487 /* Check the link status of all ports in up to 9s, and print them finally */
489 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
491 #define CHECK_INTERVAL 100 /* 100ms */
492 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
494 uint8_t count, all_ports_up, print_flag = 0;
495 struct rte_eth_link link;
497 printf("\nChecking link status");
499 for (count = 0; count <= MAX_CHECK_TIME; count++) {
501 for (portid = 0; portid < port_num; portid++) {
502 if ((port_mask & (1 << portid)) == 0)
504 memset(&link, 0, sizeof(link));
505 rte_eth_link_get_nowait(portid, &link);
506 /* print link status if flag set */
507 if (print_flag == 1) {
508 if (link.link_status)
510 "Port%d Link Up. Speed %u Mbps - %s\n",
511 portid, link.link_speed,
512 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
513 ("full-duplex") : ("half-duplex\n"));
515 printf("Port %d Link Down\n", portid);
518 /* clear all_ports_up flag if any link down */
519 if (link.link_status == ETH_LINK_DOWN) {
524 /* after finally printing all link status, get out */
528 if (all_ports_up == 0) {
531 rte_delay_ms(CHECK_INTERVAL);
534 /* set the print_flag if all ports up or timeout */
535 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
542 /* Initialise ports/queues etc. and start main loop on each core */
544 main(int argc, char** argv)
547 unsigned i,high_port;
548 uint16_t nb_sys_ports, port;
550 /* Associate signal_hanlder function with USR signals */
551 signal(SIGUSR1, signal_handler);
552 signal(SIGUSR2, signal_handler);
555 ret = rte_eal_init(argc, argv);
557 FATAL_ERROR("Could not initialise EAL (%d)", ret);
561 /* Parse application arguments (after the EAL ones) */
562 parse_args(argc, argv);
564 /* Create the mbuf pool */
565 pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
566 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
567 if (pktmbuf_pool == NULL) {
568 FATAL_ERROR("Could not initialise mbuf pool");
572 /* Get number of ports found in scan */
573 nb_sys_ports = rte_eth_dev_count();
574 if (nb_sys_ports == 0)
575 FATAL_ERROR("No supported Ethernet device found");
576 /* Find highest port set in portmask */
577 for (high_port = (sizeof(ports_mask) * 8) - 1;
578 (high_port != 0) && !(ports_mask & (1 << high_port));
581 if (high_port > nb_sys_ports)
582 FATAL_ERROR("Port mask requires more ports than available");
584 /* Initialise each port */
585 for (port = 0; port < nb_sys_ports; port++) {
586 /* Skip ports that are not enabled */
587 if ((ports_mask & (1 << port)) == 0) {
592 check_all_ports_link_status(nb_sys_ports, ports_mask);
594 /* Launch per-lcore function on every lcore */
595 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
596 RTE_LCORE_FOREACH_SLAVE(i) {
597 if (rte_eal_wait_lcore(i) < 0)