4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
45 #include <netinet/in.h>
47 #include <linux/if_tun.h>
49 #include <sys/ioctl.h>
53 #include <rte_common.h>
55 #include <rte_memory.h>
56 #include <rte_memcpy.h>
57 #include <rte_memzone.h>
58 #include <rte_tailq.h>
60 #include <rte_per_lcore.h>
61 #include <rte_launch.h>
62 #include <rte_atomic.h>
63 #include <rte_lcore.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_interrupts.h>
67 #include <rte_debug.h>
68 #include <rte_ether.h>
69 #include <rte_ethdev.h>
72 #include <rte_mempool.h>
74 #include <rte_string_fns.h>
76 /* Macros for printing using RTE_LOG */
77 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
78 #define FATAL_ERROR(fmt, args...) rte_exit(EXIT_FAILURE, fmt "\n", ##args)
79 #define PRINT_INFO(fmt, args...) RTE_LOG(INFO, APP, fmt "\n", ##args)
81 /* NUMA socket to allocate mbuf pool on */
84 /* Max ports than can be used (each port is associated with two lcores) */
85 #define MAX_PORTS (RTE_MAX_LCORE / 2)
87 /* Max size of a single packet */
88 #define MAX_PACKET_SZ 2048
90 /* Number of bytes needed for each mbuf */
92 (MAX_PACKET_SZ + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
94 /* Number of mbufs in mempool that is created */
97 /* How many packets to attempt to read from NIC in one go */
98 #define PKT_BURST_SZ 32
100 /* How many objects (mbufs) to keep in per-lcore mempool cache */
101 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
103 /* Number of RX ring descriptors */
106 /* Number of TX ring descriptors */
110 * RX and TX Prefetch, Host, and Write-back threshold values should be
111 * carefully set for optimal performance. Consult the network
112 * controller's datasheet and supporting DPDK documentation for guidance
113 * on how these parameters should be set.
115 /* RX ring configuration */
116 static const struct rte_eth_rxconf rx_conf = {
118 .pthresh = 8, /* Ring prefetch threshold */
119 .hthresh = 8, /* Ring host threshold */
120 .wthresh = 4, /* Ring writeback threshold */
122 .rx_free_thresh = 0, /* Immediately free RX descriptors */
126 * These default values are optimized for use with the Intel(R) 82599 10 GbE
127 * Controller and the DPDK ixgbe PMD. Consider using other values for other
128 * network controllers and/or network drivers.
130 /* TX ring configuration */
131 static const struct rte_eth_txconf tx_conf = {
133 .pthresh = 36, /* Ring prefetch threshold */
134 .hthresh = 0, /* Ring host threshold */
135 .wthresh = 0, /* Ring writeback threshold */
137 .tx_free_thresh = 0, /* Use PMD default values */
138 .tx_rs_thresh = 0, /* Use PMD default values */
141 /* Options for configuring ethernet port */
142 static const struct rte_eth_conf port_conf = {
144 .header_split = 0, /* Header Split disabled */
145 .hw_ip_checksum = 0, /* IP checksum offload disabled */
146 .hw_vlan_filter = 0, /* VLAN filtering disabled */
147 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
148 .hw_strip_crc = 0, /* CRC stripped by hardware */
154 /* Mempool for mbufs */
155 static struct rte_mempool * pktmbuf_pool = NULL;
157 /* Mask of enabled ports */
158 static uint32_t ports_mask = 0;
160 /* Mask of cores that read from NIC and write to tap */
161 static uint32_t input_cores_mask = 0;
163 /* Mask of cores that read from tap and write to NIC */
164 static uint32_t output_cores_mask = 0;
166 /* Array storing port_id that is associated with each lcore */
167 static uint8_t port_ids[RTE_MAX_LCORE];
169 /* Structure type for recording lcore-specific stats */
176 /* Array of lcore-specific stats */
177 static struct stats lcore_stats[RTE_MAX_LCORE];
179 /* Print out statistics on packets handled */
185 printf("\n**Exception-Path example application statistics**\n"
186 "======= ====== ============ ============ ===============\n"
187 " Lcore Port RX TX Dropped on TX\n"
188 "------- ------ ------------ ------------ ---------------\n");
189 RTE_LCORE_FOREACH(i) {
190 printf("%6u %7u %13"PRIu64" %13"PRIu64" %16"PRIu64"\n",
191 i, (unsigned)port_ids[i],
192 lcore_stats[i].rx, lcore_stats[i].tx,
193 lcore_stats[i].dropped);
195 printf("======= ====== ============ ============ ===============\n");
198 /* Custom handling of signals to handle stats */
200 signal_handler(int signum)
202 /* When we receive a USR1 signal, print stats */
203 if (signum == SIGUSR1) {
207 /* When we receive a USR2 signal, reset stats */
208 if (signum == SIGUSR2) {
209 memset(&lcore_stats, 0, sizeof(lcore_stats));
210 printf("\n**Statistics have been reset**\n");
216 * Create a tap network interface, or use existing one with same name.
217 * If name[0]='\0' then a name is automatically assigned and returned in name.
219 static int tap_create(char *name)
224 fd = open("/dev/net/tun", O_RDWR);
228 memset(&ifr, 0, sizeof(ifr));
230 /* TAP device without packet information */
231 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
234 rte_snprintf(ifr.ifr_name, IFNAMSIZ, name);
236 ret = ioctl(fd, TUNSETIFF, (void *) &ifr);
243 rte_snprintf(name, IFNAMSIZ, ifr.ifr_name);
248 /* Main processing loop */
249 static __attribute__((noreturn)) int
250 main_loop(__attribute__((unused)) void *arg)
252 const unsigned lcore_id = rte_lcore_id();
253 char tap_name[IFNAMSIZ];
256 /* Create new tap interface */
257 rte_snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
258 tap_fd = tap_create(tap_name);
260 FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
263 if ((1 << lcore_id) & input_cores_mask) {
264 PRINT_INFO("Lcore %u is reading from port %u and writing to %s",
265 lcore_id, (unsigned)port_ids[lcore_id], tap_name);
267 /* Loop forever reading from NIC and writing to tap */
269 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
271 const unsigned nb_rx =
272 rte_eth_rx_burst(port_ids[lcore_id], 0,
273 pkts_burst, PKT_BURST_SZ);
274 lcore_stats[lcore_id].rx += nb_rx;
275 for (i = 0; likely(i < nb_rx); i++) {
276 struct rte_mbuf *m = pkts_burst[i];
277 /* Ignore return val from write() */
278 int ret = write(tap_fd,
279 rte_pktmbuf_mtod(m, void*),
280 rte_pktmbuf_data_len(m));
282 if (unlikely(ret < 0))
283 lcore_stats[lcore_id].dropped++;
285 lcore_stats[lcore_id].tx++;
289 else if ((1 << lcore_id) & output_cores_mask) {
290 PRINT_INFO("Lcore %u is reading from %s and writing to port %u",
291 lcore_id, tap_name, (unsigned)port_ids[lcore_id]);
293 /* Loop forever reading from tap and writing to NIC */
296 struct rte_mbuf *m = rte_pktmbuf_alloc(pktmbuf_pool);
300 ret = read(tap_fd, m->pkt.data, MAX_PACKET_SZ);
301 lcore_stats[lcore_id].rx++;
302 if (unlikely(ret < 0)) {
303 FATAL_ERROR("Reading from %s interface failed",
308 m->pkt.pkt_len = (uint16_t)ret;
309 m->pkt.data_len = (uint16_t)ret;
310 ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
311 if (unlikely(ret < 1)) {
313 lcore_stats[lcore_id].dropped++;
316 lcore_stats[lcore_id].tx++;
321 PRINT_INFO("Lcore %u has nothing to do", lcore_id);
323 ; /* loop doing nothing */
326 * Tap file is closed automatically when program exits. Putting close()
327 * here will cause the compiler to give an error about unreachable code.
331 /* Display usage instructions */
333 print_usage(const char *prgname)
335 PRINT_INFO("\nUsage: %s [EAL options] -- -p PORTMASK -i IN_CORES -o OUT_CORES\n"
336 " -p PORTMASK: hex bitmask of ports to use\n"
337 " -i IN_CORES: hex bitmask of cores which read from NIC\n"
338 " -o OUT_CORES: hex bitmask of cores which write to NIC",
342 /* Convert string to unsigned number. 0 is returned if error occurs */
344 parse_unsigned(const char *portmask)
349 num = strtoul(portmask, &end, 16);
350 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
353 return (uint32_t)num;
356 /* Record affinities between ports and lcores in global port_ids[] array */
358 setup_port_lcore_affinities(void)
364 /* Setup port_ids[] array, and check masks were ok */
365 RTE_LCORE_FOREACH(i) {
366 if (input_cores_mask & (1 << i)) {
367 /* Skip ports that are not enabled */
368 while ((ports_mask & (1 << rx_port)) == 0) {
370 if (rx_port > (sizeof(ports_mask) * 8))
371 goto fail; /* not enough ports */
374 port_ids[i] = rx_port++;
376 else if (output_cores_mask & (1 << i)) {
377 /* Skip ports that are not enabled */
378 while ((ports_mask & (1 << tx_port)) == 0) {
380 if (tx_port > (sizeof(ports_mask) * 8))
381 goto fail; /* not enough ports */
384 port_ids[i] = tx_port++;
388 if (rx_port != tx_port)
389 goto fail; /* uneven number of cores in masks */
391 if (ports_mask & (~((1 << rx_port) - 1)))
392 goto fail; /* unused ports */
396 FATAL_ERROR("Invalid core/port masks specified on command line");
399 /* Parse the arguments given in the command line of the application */
401 parse_args(int argc, char **argv)
404 const char *prgname = argv[0];
406 /* Disable printing messages within getopt() */
409 /* Parse command line */
410 while ((opt = getopt(argc, argv, "i:o:p:")) != EOF) {
413 input_cores_mask = parse_unsigned(optarg);
416 output_cores_mask = parse_unsigned(optarg);
419 ports_mask = parse_unsigned(optarg);
422 print_usage(prgname);
423 FATAL_ERROR("Invalid option specified");
427 /* Check that options were parsed ok */
428 if (input_cores_mask == 0) {
429 print_usage(prgname);
430 FATAL_ERROR("IN_CORES not specified correctly");
432 if (output_cores_mask == 0) {
433 print_usage(prgname);
434 FATAL_ERROR("OUT_CORES not specified correctly");
436 if (ports_mask == 0) {
437 print_usage(prgname);
438 FATAL_ERROR("PORTMASK not specified correctly");
441 setup_port_lcore_affinities();
444 /* Initialise a single port on an Ethernet device */
446 init_port(uint8_t port)
448 struct rte_eth_link link;
451 /* Initialise device and RX/TX queues */
452 PRINT_INFO("Initialising port %u ...", (unsigned)port);
454 ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
456 FATAL_ERROR("Could not configure port%u (%d)",
457 (unsigned)port, ret);
459 ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, SOCKET, &rx_conf,
462 FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
463 (unsigned)port, ret);
465 ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, SOCKET, &tx_conf);
467 FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
468 (unsigned)port, ret);
470 ret = rte_eth_dev_start(port);
472 FATAL_ERROR("Could not start port%u (%d)", (unsigned)port, ret);
474 /* Everything is setup and started, print link status */
475 rte_eth_link_get(port, &link);
476 if (link.link_status)
477 PRINT_INFO(" link up - %u Mbit/s - %s",
478 (unsigned)link.link_speed,
479 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
480 ("full-duplex") : ("half-duplex"));
482 PRINT_INFO(" link down");
484 rte_eth_promiscuous_enable(port);
487 /* Initialise ports/queues etc. and start main loop on each core */
489 main(int argc, char** argv)
492 unsigned i,high_port;
493 uint8_t nb_sys_ports, port;
495 /* Associate signal_hanlder function with USR signals */
496 signal(SIGUSR1, signal_handler);
497 signal(SIGUSR2, signal_handler);
500 ret = rte_eal_init(argc, argv);
502 FATAL_ERROR("Could not initialise EAL (%d)", ret);
506 /* Parse application arguments (after the EAL ones) */
507 parse_args(argc, argv);
509 /* Create the mbuf pool */
510 pktmbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SZ,
512 sizeof(struct rte_pktmbuf_pool_private),
513 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
515 if (pktmbuf_pool == NULL) {
516 FATAL_ERROR("Could not initialise mbuf pool");
520 /* Initialise PMD driver(s) */
521 ret = rte_pmd_init_all();
523 FATAL_ERROR("Could not probe PMD (%d)", ret);
525 /* Scan PCI bus for recognised devices */
526 ret = rte_eal_pci_probe();
528 FATAL_ERROR("Could not probe PCI (%d)", ret);
530 /* Get number of ports found in scan */
531 nb_sys_ports = rte_eth_dev_count();
532 if (nb_sys_ports == 0)
533 FATAL_ERROR("No supported Ethernet devices found - check that "
534 "CONFIG_RTE_LIBRTE_IGB_PMD=y and/or "
535 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in the config file");
536 /* Find highest port set in portmask */
537 for (high_port = (sizeof(ports_mask) * 8) - 1;
538 (high_port != 0) && !(ports_mask & (1 << high_port));
541 if (high_port > nb_sys_ports)
542 FATAL_ERROR("Port mask requires more ports than available");
544 /* Initialise each port */
545 for (port = 0; port < nb_sys_ports; port++) {
546 /* Skip ports that are not enabled */
547 if ((ports_mask & (1 << port)) == 0) {
553 /* Launch per-lcore function on every lcore */
554 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
555 RTE_LCORE_FOREACH_SLAVE(i) {
556 if (rte_eal_wait_lcore(i) < 0)