4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
41 #include <sys/queue.h>
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
74 #include <rte_string_fns.h>
78 #define APP_LOOKUP_EXACT_MATCH 0
79 #define APP_LOOKUP_LPM 1
80 #define DO_RFC_1812_CHECKS
82 //#define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH
83 #ifndef APP_LOOKUP_METHOD
84 #define APP_LOOKUP_METHOD APP_LOOKUP_LPM
87 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
89 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
92 #error "APP_LOOKUP_METHOD set to incorrect value"
96 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
97 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
98 #define IPv6_BYTES(addr) \
99 addr[0], addr[1], addr[2], addr[3], \
100 addr[4], addr[5], addr[6], addr[7], \
101 addr[8], addr[9], addr[10], addr[11],\
102 addr[12], addr[13],addr[14], addr[15]
106 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
108 #define MAX_JUMBO_PKT_LEN 9600
110 #define IPV6_ADDR_LEN 16
112 #define MEMPOOL_CACHE_SIZE 256
114 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
117 * This expression is used to calculate the number of mbufs needed depending on user input, taking
118 * into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
119 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
122 #define NB_MBUF RTE_MAX ( \
123 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \
124 nb_ports*nb_lcores*MAX_PKT_BURST + \
125 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \
126 nb_lcores*MEMPOOL_CACHE_SIZE), \
130 * RX and TX Prefetch, Host, and Write-back threshold values should be
131 * carefully set for optimal performance. Consult the network
132 * controller's datasheet and supporting DPDK documentation for guidance
133 * on how these parameters should be set.
135 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
136 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
137 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
140 * These default values are optimized for use with the Intel(R) 82599 10 GbE
141 * Controller and the DPDK ixgbe PMD. Consider using other values for other
142 * network controllers and/or network drivers.
144 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
145 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
146 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
148 #define MAX_PKT_BURST 32
149 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
153 /* Configure how many packets ahead to prefetch, when reading packets */
154 #define PREFETCH_OFFSET 3
157 * Configurable number of RX/TX ring descriptors
159 #define RTE_TEST_RX_DESC_DEFAULT 128
160 #define RTE_TEST_TX_DESC_DEFAULT 512
161 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
162 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
164 /* ethernet addresses of ports */
165 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
167 /* mask of enabled ports */
168 static uint32_t enabled_port_mask = 0;
169 static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
170 static int numa_on = 1; /**< NUMA is enabled by default. */
174 struct rte_mbuf *m_table[MAX_PKT_BURST];
177 struct lcore_rx_queue {
180 } __rte_cache_aligned;
182 #define MAX_RX_QUEUE_PER_LCORE 16
183 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
184 #define MAX_RX_QUEUE_PER_PORT 128
186 #define MAX_LCORE_PARAMS 1024
187 struct lcore_params {
191 } __rte_cache_aligned;
193 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
194 static struct lcore_params lcore_params_array_default[] = {
206 static struct lcore_params * lcore_params = lcore_params_array_default;
207 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
208 sizeof(lcore_params_array_default[0]);
210 static struct rte_eth_conf port_conf = {
212 .max_rx_pkt_len = ETHER_MAX_LEN,
214 .header_split = 0, /**< Header Split disabled */
215 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
216 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
217 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
218 .hw_strip_crc = 0, /**< CRC stripped by hardware */
223 .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
227 .mq_mode = ETH_MQ_TX_NONE,
231 static const struct rte_eth_rxconf rx_conf = {
233 .pthresh = RX_PTHRESH,
234 .hthresh = RX_HTHRESH,
235 .wthresh = RX_WTHRESH,
237 .rx_free_thresh = 32,
240 static const struct rte_eth_txconf tx_conf = {
242 .pthresh = TX_PTHRESH,
243 .hthresh = TX_HTHRESH,
244 .wthresh = TX_WTHRESH,
246 .tx_free_thresh = 0, /* Use PMD default values */
247 .tx_rs_thresh = 0, /* Use PMD default values */
251 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
254 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
256 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
257 #include <rte_hash_crc.h>
258 #define DEFAULT_HASH_FUNC rte_hash_crc
260 #include <rte_jhash.h>
261 #define DEFAULT_HASH_FUNC rte_jhash
270 } __attribute__((__packed__));
273 uint8_t ip_dst[IPV6_ADDR_LEN];
274 uint8_t ip_src[IPV6_ADDR_LEN];
278 } __attribute__((__packed__));
280 struct ipv4_l3fwd_route {
281 struct ipv4_5tuple key;
285 struct ipv6_l3fwd_route {
286 struct ipv6_5tuple key;
290 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
291 {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
292 {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
293 {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
294 {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
297 static struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
300 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
302 {0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
303 0x02, 0x1e, 0x67, 0xff, 0xfe, 0x0d, 0xb6, 0x0a},
309 typedef struct rte_hash lookup_struct_t;
310 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
311 static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
313 #define L3FWD_HASH_ENTRIES 1024
315 struct rte_hash_parameters ipv4_l3fwd_hash_params = {
316 .name = "ipv4_l3fwd_hash_0",
317 .entries = L3FWD_HASH_ENTRIES,
319 .key_len = sizeof(struct ipv4_5tuple),
320 .hash_func = DEFAULT_HASH_FUNC,
321 .hash_func_init_val = 0,
325 struct rte_hash_parameters ipv6_l3fwd_hash_params = {
326 .name = "ipv6_l3fwd_hash_0",
327 .entries = L3FWD_HASH_ENTRIES,
329 .key_len = sizeof(struct ipv6_5tuple),
330 .hash_func = DEFAULT_HASH_FUNC,
331 .hash_func_init_val = 0,
335 #define IPV4_L3FWD_NUM_ROUTES \
336 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
338 #define IPV6_L3FWD_NUM_ROUTES \
339 (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
341 static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
342 static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
345 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
346 struct ipv4_l3fwd_route {
352 static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
353 {IPv4(1,1,1,0), 24, 0},
354 {IPv4(2,1,1,0), 24, 1},
355 {IPv4(3,1,1,0), 24, 2},
356 {IPv4(4,1,1,0), 24, 3},
357 {IPv4(5,1,1,0), 24, 4},
358 {IPv4(6,1,1,0), 24, 5},
359 {IPv4(7,1,1,0), 24, 6},
360 {IPv4(8,1,1,0), 24, 7},
363 #define IPV4_L3FWD_NUM_ROUTES \
364 (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
366 #define IPV4_L3FWD_LPM_MAX_RULES 1024
368 typedef struct rte_lpm lookup_struct_t;
369 static lookup_struct_t *ipv4_l3fwd_lookup_struct[NB_SOCKETS];
374 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
375 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
376 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
377 lookup_struct_t * ipv4_lookup_struct;
378 lookup_struct_t * ipv6_lookup_struct;
379 } __rte_cache_aligned;
381 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
383 /* Send burst of packets on an output interface */
385 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
387 struct rte_mbuf **m_table;
391 queueid = qconf->tx_queue_id[port];
392 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
394 ret = rte_eth_tx_burst(port, queueid, m_table, n);
395 if (unlikely(ret < n)) {
397 rte_pktmbuf_free(m_table[ret]);
404 /* Enqueue a single packet, and send burst if queue is filled */
406 send_single_packet(struct rte_mbuf *m, uint8_t port)
410 struct lcore_conf *qconf;
412 lcore_id = rte_lcore_id();
414 qconf = &lcore_conf[lcore_id];
415 len = qconf->tx_mbufs[port].len;
416 qconf->tx_mbufs[port].m_table[len] = m;
419 /* enough pkts to be sent */
420 if (unlikely(len == MAX_PKT_BURST)) {
421 send_burst(qconf, MAX_PKT_BURST, port);
425 qconf->tx_mbufs[port].len = len;
429 #ifdef DO_RFC_1812_CHECKS
431 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
433 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
435 * 1. The packet length reported by the Link Layer must be large
436 * enough to hold the minimum length legal IP datagram (20 bytes).
438 if (link_len < sizeof(struct ipv4_hdr))
441 /* 2. The IP checksum must be correct. */
442 /* this is checked in H/W */
445 * 3. The IP version number must be 4. If the version number is not 4
446 * then the packet may be another version of IP, such as IPng or
449 if (((pkt->version_ihl) >> 4) != 4)
452 * 4. The IP header length field must be large enough to hold the
453 * minimum length legal IP datagram (20 bytes = 5 words).
455 if ((pkt->version_ihl & 0xf) < 5)
459 * 5. The IP total length field must be large enough to hold the IP
460 * datagram header, whose length is specified in the IP header length
463 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
470 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
472 print_ipv4_key(struct ipv4_5tuple key)
474 printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n",
475 (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
478 print_ipv6_key(struct ipv6_5tuple key)
480 printf( "IP dst = " IPv6_BYTES_FMT ", IP src = " IPv6_BYTES_FMT ", "
481 "port dst = %d, port src = %d, proto = %d\n",
482 IPv6_BYTES(key.ip_dst), IPv6_BYTES(key.ip_src),
483 key.port_dst, key.port_src, key.proto);
486 static inline uint8_t
487 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
489 struct ipv4_5tuple key;
494 key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
495 key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
496 key.proto = ipv4_hdr->next_proto_id;
498 switch (ipv4_hdr->next_proto_id) {
500 tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
501 sizeof(struct ipv4_hdr));
502 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
503 key.port_src = rte_be_to_cpu_16(tcp->src_port);
507 udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
508 sizeof(struct ipv4_hdr));
509 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
510 key.port_src = rte_be_to_cpu_16(udp->src_port);
519 /* Find destination port */
520 ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
521 return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
524 static inline uint8_t
525 get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid, lookup_struct_t * ipv6_l3fwd_lookup_struct)
527 struct ipv6_5tuple key;
532 memcpy(key.ip_dst, ipv6_hdr->dst_addr, IPV6_ADDR_LEN);
533 memcpy(key.ip_src, ipv6_hdr->src_addr, IPV6_ADDR_LEN);
535 key.proto = ipv6_hdr->proto;
537 switch (ipv6_hdr->proto) {
539 tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr +
540 sizeof(struct ipv6_hdr));
541 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
542 key.port_src = rte_be_to_cpu_16(tcp->src_port);
546 udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
547 sizeof(struct ipv6_hdr));
548 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
549 key.port_src = rte_be_to_cpu_16(udp->src_port);
558 /* Find destination port */
559 ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
560 return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
564 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
565 static inline uint8_t
566 get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct)
570 return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
571 rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
577 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qconf)
579 struct ether_hdr *eth_hdr;
580 struct ipv4_hdr *ipv4_hdr;
584 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
586 if (m->ol_flags & PKT_RX_IPV4_HDR) {
587 /* Handle IPv4 headers.*/
588 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
589 sizeof(struct ether_hdr));
591 #ifdef DO_RFC_1812_CHECKS
592 /* Check to make sure the packet is valid (RFC1812) */
593 if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
599 dst_port = get_ipv4_dst_port(ipv4_hdr, portid, qconf->ipv4_lookup_struct);
600 if (dst_port >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port) == 0)
603 /* 02:00:00:00:00:xx */
604 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
605 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
607 #ifdef DO_RFC_1812_CHECKS
608 /* Update time to live and header checksum */
609 --(ipv4_hdr->time_to_live);
610 ++(ipv4_hdr->hdr_checksum);
614 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
616 send_single_packet(m, dst_port);
619 /* Handle IPv6 headers.*/
620 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
621 struct ipv6_hdr *ipv6_hdr;
623 ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
624 sizeof(struct ether_hdr));
626 dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct);
628 if (dst_port >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port) == 0)
631 /* 02:00:00:00:00:xx */
632 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
633 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
636 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
638 send_single_packet(m, dst_port);
640 /* We don't currently handle IPv6 packets in LPM mode. */
647 /* main processing loop */
648 static __attribute__((noreturn)) int
649 main_loop(__attribute__((unused)) void *dummy)
651 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
653 uint64_t prev_tsc, diff_tsc, cur_tsc;
655 uint8_t portid, queueid;
656 struct lcore_conf *qconf;
657 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
661 lcore_id = rte_lcore_id();
662 qconf = &lcore_conf[lcore_id];
664 if (qconf->n_rx_queue == 0) {
665 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
669 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
671 for (i = 0; i < qconf->n_rx_queue; i++) {
673 portid = qconf->rx_queue_list[i].port_id;
674 queueid = qconf->rx_queue_list[i].queue_id;
675 RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
681 cur_tsc = rte_rdtsc();
684 * TX burst queue drain
686 diff_tsc = cur_tsc - prev_tsc;
687 if (unlikely(diff_tsc > drain_tsc)) {
690 * This could be optimized (use queueid instead of
691 * portid), but it is not called so often
693 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
694 if (qconf->tx_mbufs[portid].len == 0)
696 send_burst(&lcore_conf[lcore_id],
697 qconf->tx_mbufs[portid].len,
699 qconf->tx_mbufs[portid].len = 0;
706 * Read packet from RX queues
708 for (i = 0; i < qconf->n_rx_queue; ++i) {
710 portid = qconf->rx_queue_list[i].port_id;
711 queueid = qconf->rx_queue_list[i].queue_id;
712 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST);
714 /* Prefetch first packets */
715 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
716 rte_prefetch0(rte_pktmbuf_mtod(
717 pkts_burst[j], void *));
720 /* Prefetch and forward already prefetched packets */
721 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
722 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
723 j + PREFETCH_OFFSET], void *));
724 l3fwd_simple_forward(pkts_burst[j], portid, qconf);
727 /* Forward remaining prefetched packets */
728 for (; j < nb_rx; j++) {
729 l3fwd_simple_forward(pkts_burst[j], portid, qconf);
736 check_lcore_params(void)
738 uint8_t queue, lcore;
742 for (i = 0; i < nb_lcore_params; ++i) {
743 queue = lcore_params[i].queue_id;
744 if (queue >= MAX_RX_QUEUE_PER_PORT) {
745 printf("invalid queue number: %hhu\n", queue);
748 lcore = lcore_params[i].lcore_id;
749 if (!rte_lcore_is_enabled(lcore)) {
750 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
753 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
755 printf("warning: lcore %hhu is on socket %d with numa off \n",
763 check_port_config(const unsigned nb_ports)
768 for (i = 0; i < nb_lcore_params; ++i) {
769 portid = lcore_params[i].port_id;
770 if ((enabled_port_mask & (1 << portid)) == 0) {
771 printf("port %u is not enabled in port mask\n", portid);
774 if (portid >= nb_ports) {
775 printf("port %u is not present on the board\n", portid);
783 get_port_n_rx_queues(const uint8_t port)
788 for (i = 0; i < nb_lcore_params; ++i) {
789 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
790 queue = lcore_params[i].queue_id;
792 return (uint8_t)(++queue);
796 init_lcore_rx_queues(void)
798 uint16_t i, nb_rx_queue;
801 for (i = 0; i < nb_lcore_params; ++i) {
802 lcore = lcore_params[i].lcore_id;
803 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
804 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
805 printf("error: too many queues (%u) for lcore: %u\n",
806 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
809 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
810 lcore_params[i].port_id;
811 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
812 lcore_params[i].queue_id;
813 lcore_conf[lcore].n_rx_queue++;
821 print_usage(const char *prgname)
823 printf ("%s [EAL options] -- -p PORTMASK -P"
824 " [--config (port,queue,lcore)[,(port,queue,lcore]]"
825 " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
826 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
827 " -P : enable promiscuous mode\n"
828 " --config (port,queue,lcore): rx queues configuration\n"
829 " --no-numa: optional, disable numa awareness\n"
830 " --enable-jumbo: enable jumbo frame"
831 " which max packet len is PKTLEN in decimal (64-9600)\n",
835 static int parse_max_pkt_len(const char *pktlen)
840 /* parse decimal string */
841 len = strtoul(pktlen, &end, 10);
842 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
852 parse_portmask(const char *portmask)
857 /* parse hexadecimal string */
858 pm = strtoul(portmask, &end, 16);
859 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
869 parse_config(const char *q_arg)
872 const char *p, *p0 = q_arg;
880 unsigned long int_fld[_NUM_FLD];
881 char *str_fld[_NUM_FLD];
887 while ((p = strchr(p0,'(')) != NULL) {
889 if((p0 = strchr(p,')')) == NULL)
893 if(size >= sizeof(s))
896 rte_snprintf(s, sizeof(s), "%.*s", size, p);
897 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
899 for (i = 0; i < _NUM_FLD; i++){
901 int_fld[i] = strtoul(str_fld[i], &end, 0);
902 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
905 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
906 printf("exceeded max number of lcore params: %hu\n",
910 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
911 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
912 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
915 lcore_params = lcore_params_array;
919 /* Parse the argument given in the command line of the application */
921 parse_args(int argc, char **argv)
926 char *prgname = argv[0];
927 static struct option lgopts[] = {
929 {"no-numa", 0, 0, 0},
930 {"enable-jumbo", 0, 0, 0},
936 while ((opt = getopt_long(argc, argvopt, "p:P",
937 lgopts, &option_index)) != EOF) {
942 enabled_port_mask = parse_portmask(optarg);
943 if (enabled_port_mask == 0) {
944 printf("invalid portmask\n");
945 print_usage(prgname);
950 printf("Promiscuous mode selected\n");
956 if (!strncmp(lgopts[option_index].name, "config", 6)) {
957 ret = parse_config(optarg);
959 printf("invalid config\n");
960 print_usage(prgname);
965 if (!strncmp(lgopts[option_index].name, "no-numa", 7)) {
966 printf("numa is disabled \n");
970 if (!strncmp(lgopts[option_index].name, "enable-jumbo", 12)) {
971 struct option lenopts = {"max-pkt-len", required_argument, 0, 0};
973 printf("jumbo frame is enabled \n");
974 port_conf.rxmode.jumbo_frame = 1;
976 /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
977 if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
978 ret = parse_max_pkt_len(optarg);
979 if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)){
980 printf("invalid packet length\n");
981 print_usage(prgname);
984 port_conf.rxmode.max_rx_pkt_len = ret;
986 printf("set jumbo frame max packet length to %u\n",
987 (unsigned int)port_conf.rxmode.max_rx_pkt_len);
993 print_usage(prgname);
999 argv[optind-1] = prgname;
1002 optind = 0; /* reset getopt lib */
1007 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1009 printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
1010 eth_addr->addr_bytes[0],
1011 eth_addr->addr_bytes[1],
1012 eth_addr->addr_bytes[2],
1013 eth_addr->addr_bytes[3],
1014 eth_addr->addr_bytes[4],
1015 eth_addr->addr_bytes[5]);
1018 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1020 setup_hash(int socketid)
1026 /* create ipv4 hash */
1027 rte_snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
1028 ipv4_l3fwd_hash_params.name = s;
1029 ipv4_l3fwd_hash_params.socket_id = socketid;
1030 ipv4_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv4_l3fwd_hash_params);
1031 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1032 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1033 "socket %d\n", socketid);
1035 /* create ipv6 hash */
1036 rte_snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
1037 ipv6_l3fwd_hash_params.name = s;
1038 ipv6_l3fwd_hash_params.socket_id = socketid;
1039 ipv6_l3fwd_lookup_struct[socketid] = rte_hash_create(&ipv6_l3fwd_hash_params);
1040 if (ipv6_l3fwd_lookup_struct[socketid] == NULL)
1041 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
1042 "socket %d\n", socketid);
1045 /* populate the ipv4 hash */
1046 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1047 ret = rte_hash_add_key (ipv4_l3fwd_lookup_struct[socketid],
1048 (void *) &ipv4_l3fwd_route_array[i].key);
1050 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1051 "l3fwd hash on socket %d\n", i, socketid);
1053 ipv4_l3fwd_out_if[ret] = ipv4_l3fwd_route_array[i].if_out;
1054 printf("Hash: Adding key\n");
1055 print_ipv4_key(ipv4_l3fwd_route_array[i].key);
1058 /* populate the ipv6 hash */
1059 for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
1060 ret = rte_hash_add_key (ipv6_l3fwd_lookup_struct[socketid],
1061 (void *) &ipv6_l3fwd_route_array[i].key);
1063 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
1064 "l3fwd hash on socket %d\n", i, socketid);
1066 ipv6_l3fwd_out_if[ret] = ipv6_l3fwd_route_array[i].if_out;
1067 printf("Hash: Adding key\n");
1068 print_ipv6_key(ipv6_l3fwd_route_array[i].key);
1073 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1075 setup_lpm(int socketid)
1081 /* create the LPM table */
1082 rte_snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
1083 ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
1084 IPV4_L3FWD_LPM_MAX_RULES, 0);
1085 if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
1086 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
1087 " on socket %d\n", socketid);
1089 /* populate the LPM table */
1090 for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
1091 ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid],
1092 ipv4_l3fwd_route_array[i].ip,
1093 ipv4_l3fwd_route_array[i].depth,
1094 ipv4_l3fwd_route_array[i].if_out);
1097 rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
1098 "l3fwd LPM table on socket %d\n",
1102 printf("LPM: Adding route 0x%08x / %d (%d)\n",
1103 (unsigned)ipv4_l3fwd_route_array[i].ip,
1104 ipv4_l3fwd_route_array[i].depth,
1105 ipv4_l3fwd_route_array[i].if_out);
1111 init_mem(unsigned nb_mbuf)
1113 struct lcore_conf *qconf;
1118 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1119 if (rte_lcore_is_enabled(lcore_id) == 0)
1123 socketid = rte_lcore_to_socket_id(lcore_id);
1127 if (socketid >= NB_SOCKETS) {
1128 rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n",
1129 socketid, lcore_id, NB_SOCKETS);
1131 if (pktmbuf_pool[socketid] == NULL) {
1132 rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
1133 pktmbuf_pool[socketid] =
1134 rte_mempool_create(s, nb_mbuf, MBUF_SIZE, MEMPOOL_CACHE_SIZE,
1135 sizeof(struct rte_pktmbuf_pool_private),
1136 rte_pktmbuf_pool_init, NULL,
1137 rte_pktmbuf_init, NULL,
1139 if (pktmbuf_pool[socketid] == NULL)
1140 rte_exit(EXIT_FAILURE,
1141 "Cannot init mbuf pool on socket %d\n", socketid);
1143 printf("Allocated mbuf pool on socket %d\n", socketid);
1145 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
1146 setup_lpm(socketid);
1148 setup_hash(socketid);
1151 qconf = &lcore_conf[lcore_id];
1152 qconf->ipv4_lookup_struct = ipv4_l3fwd_lookup_struct[socketid];
1153 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
1154 qconf->ipv6_lookup_struct = ipv6_l3fwd_lookup_struct[socketid];
1160 /* Check the link status of all ports in up to 9s, and print them finally */
1162 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1164 #define CHECK_INTERVAL 100 /* 100ms */
1165 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1166 uint8_t portid, count, all_ports_up, print_flag = 0;
1167 struct rte_eth_link link;
1169 printf("\nChecking link status");
1171 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1173 for (portid = 0; portid < port_num; portid++) {
1174 if ((port_mask & (1 << portid)) == 0)
1176 memset(&link, 0, sizeof(link));
1177 rte_eth_link_get_nowait(portid, &link);
1178 /* print link status if flag set */
1179 if (print_flag == 1) {
1180 if (link.link_status)
1181 printf("Port %d Link Up - speed %u "
1182 "Mbps - %s\n", (uint8_t)portid,
1183 (unsigned)link.link_speed,
1184 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1185 ("full-duplex") : ("half-duplex\n"));
1187 printf("Port %d Link Down\n",
1191 /* clear all_ports_up flag if any link down */
1192 if (link.link_status == 0) {
1197 /* after finally printing all link status, get out */
1198 if (print_flag == 1)
1201 if (all_ports_up == 0) {
1204 rte_delay_ms(CHECK_INTERVAL);
1207 /* set the print_flag if all ports up or timeout */
1208 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1216 MAIN(int argc, char **argv)
1218 struct lcore_conf *qconf;
1223 uint32_t n_tx_queue, nb_lcores;
1224 uint8_t portid, nb_rx_queue, queue, socketid;
1227 ret = rte_eal_init(argc, argv);
1229 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1233 /* parse application arguments (after the EAL ones) */
1234 ret = parse_args(argc, argv);
1236 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1238 if (check_lcore_params() < 0)
1239 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1241 ret = init_lcore_rx_queues();
1243 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1246 /* init driver(s) */
1247 if (rte_pmd_init_all() < 0)
1248 rte_exit(EXIT_FAILURE, "Cannot init pmd\n");
1250 if (rte_eal_pci_probe() < 0)
1251 rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
1253 nb_ports = rte_eth_dev_count();
1254 if (nb_ports > RTE_MAX_ETHPORTS)
1255 nb_ports = RTE_MAX_ETHPORTS;
1257 if (check_port_config(nb_ports) < 0)
1258 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1260 nb_lcores = rte_lcore_count();
1262 /* initialize all ports */
1263 for (portid = 0; portid < nb_ports; portid++) {
1264 /* skip ports that are not enabled */
1265 if ((enabled_port_mask & (1 << portid)) == 0) {
1266 printf("\nSkipping disabled port %d\n", portid);
1271 printf("Initializing port %d ... ", portid );
1274 nb_rx_queue = get_port_n_rx_queues(portid);
1275 n_tx_queue = nb_lcores;
1276 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1277 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1278 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1279 nb_rx_queue, (unsigned)n_tx_queue );
1280 ret = rte_eth_dev_configure(portid, nb_rx_queue,
1281 (uint16_t)n_tx_queue, &port_conf);
1283 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
1286 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1287 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1291 ret = init_mem(NB_MBUF);
1293 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1295 /* init one TX queue per couple (lcore,port) */
1297 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1298 if (rte_lcore_is_enabled(lcore_id) == 0)
1302 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1306 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1308 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1309 socketid, &tx_conf);
1311 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1312 "port=%d\n", ret, portid);
1314 qconf = &lcore_conf[lcore_id];
1315 qconf->tx_queue_id[portid] = queueid;
1321 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1322 if (rte_lcore_is_enabled(lcore_id) == 0)
1324 qconf = &lcore_conf[lcore_id];
1325 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1327 /* init RX queues */
1328 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1329 portid = qconf->rx_queue_list[queue].port_id;
1330 queueid = qconf->rx_queue_list[queue].queue_id;
1333 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1337 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1340 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
1341 socketid, &rx_conf, pktmbuf_pool[socketid]);
1343 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
1344 "port=%d\n", ret, portid);
1351 for (portid = 0; portid < nb_ports; portid++) {
1352 if ((enabled_port_mask & (1 << portid)) == 0) {
1356 ret = rte_eth_dev_start(portid);
1358 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1362 * If enabled, put device in promiscuous mode.
1363 * This allows IO forwarding mode to forward packets
1364 * to itself through 2 cross-connected ports of the
1368 rte_eth_promiscuous_enable(portid);
1371 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1373 /* launch per-lcore init on every lcore */
1374 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1375 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1376 if (rte_eal_wait_lcore(lcore_id) < 0)