4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
39 #include <sys/param.h>
41 #include <sys/queue.h>
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
74 #include "rte_ipv4_frag.h"
77 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
79 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
81 /* allow max jumbo frame 9.5 KB */
82 #define JUMBO_FRAME_MAX_SIZE 0x2600
84 #define ROUNDUP_DIV(a, b) (((a) + (b) - 1) / (b))
87 * Max number of fragments per packet expected.
89 #define MAX_PACKET_FRAG ROUNDUP_DIV(JUMBO_FRAME_MAX_SIZE, IPV4_DEFAULT_PAYLOAD)
94 * RX and TX Prefetch, Host, and Write-back threshold values should be
95 * carefully set for optimal performance. Consult the network
96 * controller's datasheet and supporting DPDK documentation for guidance
97 * on how these parameters should be set.
99 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
100 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
101 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
104 * These default values are optimized for use with the Intel(R) 82599 10 GbE
105 * Controller and the DPDK ixgbe PMD. Consider using other values for other
106 * network controllers and/or network drivers.
108 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
109 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
110 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
112 #define MAX_PKT_BURST 32
113 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
115 /* Configure how many packets ahead to prefetch, when reading packets */
116 #define PREFETCH_OFFSET 3
119 * Configurable number of RX/TX ring descriptors
121 #define RTE_TEST_RX_DESC_DEFAULT 128
122 #define RTE_TEST_TX_DESC_DEFAULT 512
123 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
124 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
126 /* ethernet addresses of ports */
127 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
128 static struct ether_addr remote_eth_addr =
129 {{0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}};
131 /* mask of enabled ports */
132 static int enabled_port_mask = 0;
134 static int rx_queue_per_lcore = 1;
136 #define MBUF_TABLE_SIZE (2 * MAX(MAX_PKT_BURST, MAX_PACKET_FRAG))
140 struct rte_mbuf *m_table[MBUF_TABLE_SIZE];
143 #define MAX_RX_QUEUE_PER_LCORE 16
144 #define MAX_TX_QUEUE_PER_PORT 16
145 struct lcore_queue_conf {
147 uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
148 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
149 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
151 } __rte_cache_aligned;
152 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
154 static const struct rte_eth_conf port_conf = {
156 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
158 .header_split = 0, /**< Header Split disabled */
159 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
160 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
161 .jumbo_frame = 1, /**< Jumbo Frame Support enabled */
162 .hw_strip_crc = 0, /**< CRC stripped by hardware */
165 .mq_mode = ETH_MQ_TX_NONE,
169 static const struct rte_eth_rxconf rx_conf = {
171 .pthresh = RX_PTHRESH,
172 .hthresh = RX_HTHRESH,
173 .wthresh = RX_WTHRESH,
177 static const struct rte_eth_txconf tx_conf = {
179 .pthresh = TX_PTHRESH,
180 .hthresh = TX_HTHRESH,
181 .wthresh = TX_WTHRESH,
183 .tx_free_thresh = 0, /* Use PMD default values */
184 .tx_rs_thresh = 0, /* Use PMD default values */
187 struct rte_mempool *pool_direct = NULL, *pool_indirect = NULL;
195 struct l3fwd_route l3fwd_route_array[] = {
196 {IPv4(100,10,0,0), 16, 2},
197 {IPv4(100,20,0,0), 16, 2},
198 {IPv4(100,30,0,0), 16, 0},
199 {IPv4(100,40,0,0), 16, 0},
202 #define L3FWD_NUM_ROUTES \
203 (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
205 #define L3FWD_LPM_MAX_RULES 1024
207 struct rte_lpm *l3fwd_lpm = NULL;
209 /* Send burst of packets on an output interface */
211 send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
213 struct rte_mbuf **m_table;
217 queueid = qconf->tx_queue_id[port];
218 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
220 ret = rte_eth_tx_burst(port, queueid, m_table, n);
221 if (unlikely(ret < n)) {
223 rte_pktmbuf_free(m_table[ret]);
231 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t port_in)
233 struct lcore_queue_conf *qconf;
234 struct ipv4_hdr *ip_hdr;
235 uint32_t i, len, lcore_id, ip_dst;
236 uint8_t next_hop, port_out;
239 lcore_id = rte_lcore_id();
240 qconf = &lcore_queue_conf[lcore_id];
242 /* Remove the Ethernet header and trailer from the input packet */
243 rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
245 /* Read the lookup key (i.e. ip_dst) from the input packet */
246 ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *);
247 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
249 /* Find destination port */
250 if (rte_lpm_lookup(l3fwd_lpm, ip_dst, &next_hop) == 0 &&
251 (enabled_port_mask & 1 << next_hop) != 0)
256 /* Build transmission burst */
257 len = qconf->tx_mbufs[port_out].len;
259 /* if we don't need to do any fragmentation */
260 if (likely (IPV4_MTU_DEFAULT >= m->pkt.pkt_len)) {
261 qconf->tx_mbufs[port_out].m_table[len] = m;
264 len2 = rte_ipv4_fragmentation(m,
265 &qconf->tx_mbufs[port_out].m_table[len],
266 (uint16_t)(MBUF_TABLE_SIZE - len),
268 pool_direct, pool_indirect);
270 /* Free input packet */
273 /* If we fail to fragment the packet */
274 if (unlikely (len2 < 0))
278 for (i = len; i < len + len2; i ++) {
279 m = qconf->tx_mbufs[port_out].m_table[i];
280 struct ether_hdr *eth_hdr = (struct ether_hdr *)
281 rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr));
282 if (eth_hdr == NULL) {
283 rte_panic("No headroom in mbuf.\n");
286 m->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
288 ether_addr_copy(&remote_eth_addr, ð_hdr->d_addr);
289 ether_addr_copy(&ports_eth_addr[port_out], ð_hdr->s_addr);
290 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
295 if (likely(len < MAX_PKT_BURST)) {
296 qconf->tx_mbufs[port_out].len = (uint16_t)len;
300 /* Transmit packets */
301 send_burst(qconf, (uint16_t)len, port_out);
302 qconf->tx_mbufs[port_out].len = 0;
305 /* main processing loop */
307 main_loop(__attribute__((unused)) void *dummy)
309 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
311 uint64_t prev_tsc, diff_tsc, cur_tsc;
314 struct lcore_queue_conf *qconf;
315 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
319 lcore_id = rte_lcore_id();
320 qconf = &lcore_queue_conf[lcore_id];
322 if (qconf->n_rx_queue == 0) {
323 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
327 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
329 for (i = 0; i < qconf->n_rx_queue; i++) {
331 portid = qconf->rx_queue_list[i];
332 RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%d\n", lcore_id,
338 cur_tsc = rte_rdtsc();
341 * TX burst queue drain
343 diff_tsc = cur_tsc - prev_tsc;
344 if (unlikely(diff_tsc > drain_tsc)) {
347 * This could be optimized (use queueid instead of
348 * portid), but it is not called so often
350 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
351 if (qconf->tx_mbufs[portid].len == 0)
353 send_burst(&lcore_queue_conf[lcore_id],
354 qconf->tx_mbufs[portid].len,
356 qconf->tx_mbufs[portid].len = 0;
363 * Read packet from RX queues
365 for (i = 0; i < qconf->n_rx_queue; i++) {
367 portid = qconf->rx_queue_list[i];
368 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
371 /* Prefetch first packets */
372 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
373 rte_prefetch0(rte_pktmbuf_mtod(
374 pkts_burst[j], void *));
377 /* Prefetch and forward already prefetched packets */
378 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
379 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
380 j + PREFETCH_OFFSET], void *));
381 l3fwd_simple_forward(pkts_burst[j], portid);
384 /* Forward remaining prefetched packets */
385 for (; j < nb_rx; j++) {
386 l3fwd_simple_forward(pkts_burst[j], portid);
394 print_usage(const char *prgname)
396 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
397 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
398 " -q NQ: number of queue (=ports) per lcore (default is 1)\n",
403 parse_portmask(const char *portmask)
408 /* parse hexadecimal string */
409 pm = strtoul(portmask, &end, 16);
410 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
420 parse_nqueue(const char *q_arg)
425 /* parse hexadecimal string */
426 n = strtoul(q_arg, &end, 10);
427 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
431 if (n >= MAX_RX_QUEUE_PER_LCORE)
437 /* Parse the argument given in the command line of the application */
439 parse_args(int argc, char **argv)
444 char *prgname = argv[0];
445 static struct option lgopts[] = {
451 while ((opt = getopt_long(argc, argvopt, "p:q:",
452 lgopts, &option_index)) != EOF) {
457 enabled_port_mask = parse_portmask(optarg);
458 if (enabled_port_mask < 0) {
459 printf("invalid portmask\n");
460 print_usage(prgname);
467 rx_queue_per_lcore = parse_nqueue(optarg);
468 if (rx_queue_per_lcore < 0) {
469 printf("invalid queue number\n");
470 print_usage(prgname);
477 print_usage(prgname);
481 print_usage(prgname);
486 if (enabled_port_mask == 0) {
487 printf("portmask not specified\n");
488 print_usage(prgname);
493 argv[optind-1] = prgname;
496 optind = 0; /* reset getopt lib */
501 print_ethaddr(const char *name, struct ether_addr *eth_addr)
503 printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
504 eth_addr->addr_bytes[0],
505 eth_addr->addr_bytes[1],
506 eth_addr->addr_bytes[2],
507 eth_addr->addr_bytes[3],
508 eth_addr->addr_bytes[4],
509 eth_addr->addr_bytes[5]);
512 /* Check the link status of all ports in up to 9s, and print them finally */
514 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
516 #define CHECK_INTERVAL 100 /* 100ms */
517 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
518 uint8_t portid, count, all_ports_up, print_flag = 0;
519 struct rte_eth_link link;
521 printf("\nChecking link status");
523 for (count = 0; count <= MAX_CHECK_TIME; count++) {
525 for (portid = 0; portid < port_num; portid++) {
526 if ((port_mask & (1 << portid)) == 0)
528 memset(&link, 0, sizeof(link));
529 rte_eth_link_get_nowait(portid, &link);
530 /* print link status if flag set */
531 if (print_flag == 1) {
532 if (link.link_status)
533 printf("Port %d Link Up - speed %u "
534 "Mbps - %s\n", (uint8_t)portid,
535 (unsigned)link.link_speed,
536 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
537 ("full-duplex") : ("half-duplex\n"));
539 printf("Port %d Link Down\n",
543 /* clear all_ports_up flag if any link down */
544 if (link.link_status == 0) {
549 /* after finally printing all link status, get out */
553 if (all_ports_up == 0) {
556 rte_delay_ms(CHECK_INTERVAL);
559 /* set the print_flag if all ports up or timeout */
560 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
568 MAIN(int argc, char **argv)
570 struct lcore_queue_conf *qconf;
572 unsigned nb_ports, i;
573 uint16_t queueid = 0;
574 unsigned lcore_id = 0, rx_lcore_id = 0;
575 uint32_t n_tx_queue, nb_lcores;
579 ret = rte_eal_init(argc, argv);
581 rte_exit(EXIT_FAILURE, "rte_eal_init failed");
585 /* parse application arguments (after the EAL ones) */
586 ret = parse_args(argc, argv);
588 rte_exit(EXIT_FAILURE, "Invalid arguments");
590 /* create the mbuf pools */
592 rte_mempool_create("pool_direct", NB_MBUF,
594 sizeof(struct rte_pktmbuf_pool_private),
595 rte_pktmbuf_pool_init, NULL,
596 rte_pktmbuf_init, NULL,
598 if (pool_direct == NULL)
599 rte_panic("Cannot init direct mbuf pool\n");
602 rte_mempool_create("pool_indirect", NB_MBUF,
603 sizeof(struct rte_mbuf), 32,
606 rte_pktmbuf_init, NULL,
608 if (pool_indirect == NULL)
609 rte_panic("Cannot init indirect mbuf pool\n");
612 if (rte_pmd_init_all() < 0)
613 rte_panic("Cannot init PMD\n");
615 if (rte_eal_pci_probe() < 0)
616 rte_panic("Cannot probe PCI\n");
618 nb_ports = rte_eth_dev_count();
619 if (nb_ports > RTE_MAX_ETHPORTS)
620 nb_ports = RTE_MAX_ETHPORTS;
622 nb_lcores = rte_lcore_count();
624 /* initialize all ports */
625 for (portid = 0; portid < nb_ports; portid++) {
626 /* skip ports that are not enabled */
627 if ((enabled_port_mask & (1 << portid)) == 0) {
628 printf("Skipping disabled port %d\n", portid);
632 qconf = &lcore_queue_conf[rx_lcore_id];
634 /* get the lcore_id for this port */
635 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
636 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
639 if (rx_lcore_id >= RTE_MAX_LCORE)
640 rte_exit(EXIT_FAILURE, "Not enough cores\n");
642 qconf = &lcore_queue_conf[rx_lcore_id];
644 qconf->rx_queue_list[qconf->n_rx_queue] = portid;
648 printf("Initializing port %d on lcore %u... ", portid,
652 n_tx_queue = nb_lcores;
653 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
654 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
655 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
658 rte_exit(EXIT_FAILURE, "Cannot configure device: "
662 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
663 print_ethaddr(" Address:", &ports_eth_addr[portid]);
666 /* init one RX queue */
668 printf("rxq=%d ", queueid);
670 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
671 rte_eth_dev_socket_id(portid), &rx_conf,
674 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
678 /* init one TX queue per couple (lcore,port) */
680 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
681 if (rte_lcore_is_enabled(lcore_id) == 0)
683 printf("txq=%u,%d ", lcore_id, queueid);
685 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
686 rte_eth_dev_socket_id(portid), &tx_conf);
688 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
689 "err=%d, port=%d\n", ret, portid);
691 qconf = &lcore_queue_conf[lcore_id];
692 qconf->tx_queue_id[portid] = queueid;
697 ret = rte_eth_dev_start(portid);
699 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
705 /* Set port in promiscuous mode */
706 rte_eth_promiscuous_enable(portid);
709 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
711 /* create the LPM table */
712 l3fwd_lpm = rte_lpm_create("L3FWD_LPM", rte_socket_id(), L3FWD_LPM_MAX_RULES, 0);
713 if (l3fwd_lpm == NULL)
714 rte_panic("Unable to create the l3fwd LPM table\n");
716 /* populate the LPM table */
717 for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
718 ret = rte_lpm_add(l3fwd_lpm,
719 l3fwd_route_array[i].ip,
720 l3fwd_route_array[i].depth,
721 l3fwd_route_array[i].if_out);
724 rte_panic("Unable to add entry %u to the l3fwd "
728 printf("Adding route 0x%08x / %d (%d)\n",
729 (unsigned) l3fwd_route_array[i].ip,
730 l3fwd_route_array[i].depth,
731 l3fwd_route_array[i].if_out);
734 /* launch per-lcore init on every lcore */
735 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
736 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
737 if (rte_eal_wait_lcore(lcore_id) < 0)