4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <sys/param.h>
47 #include <rte_common.h>
48 #include <rte_byteorder.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_tailq.h>
55 #include <rte_per_lcore.h>
56 #include <rte_launch.h>
57 #include <rte_atomic.h>
58 #include <rte_cycles.h>
59 #include <rte_prefetch.h>
60 #include <rte_lcore.h>
61 #include <rte_per_lcore.h>
62 #include <rte_branch_prediction.h>
63 #include <rte_interrupts.h>
65 #include <rte_random.h>
66 #include <rte_debug.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
70 #include <rte_mempool.h>
72 #include <rte_malloc.h>
76 #include <rte_string_fns.h>
80 #include <rte_ip_frag.h>
84 #define MAX_PKT_BURST 32
87 #define RTE_LOGTYPE_IP_RSMBL RTE_LOGTYPE_USER1
89 #define MAX_JUMBO_PKT_LEN 9600
93 (BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
97 /* allow max jumbo frame 9.5 KB */
98 #define JUMBO_FRAME_MAX_SIZE 0x2600
100 #define MAX_FLOW_NUM UINT16_MAX
101 #define MIN_FLOW_NUM 1
102 #define DEF_FLOW_NUM 0x1000
104 /* TTL numbers are in ms. */
105 #define MAX_FLOW_TTL (3600 * MS_PER_S)
106 #define MIN_FLOW_TTL 1
107 #define DEF_FLOW_TTL MS_PER_S
109 #define MAX_FRAG_NUM RTE_LIBRTE_IP_FRAG_MAX_FRAG
111 /* Should be power of two. */
112 #define IP_FRAG_TBL_BUCKET_ENTRIES 16
114 static uint32_t max_flow_num = DEF_FLOW_NUM;
115 static uint32_t max_flow_ttl = DEF_FLOW_TTL;
118 * RX and TX Prefetch, Host, and Write-back threshold values should be
119 * carefully set for optimal performance. Consult the network
120 * controller's datasheet and supporting DPDK documentation for guidance
121 * on how these parameters should be set.
123 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
124 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
125 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
128 * These default values are optimized for use with the Intel(R) 82599 10 GbE
129 * Controller and the DPDK ixgbe PMD. Consider using other values for other
130 * network controllers and/or network drivers.
132 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
133 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
134 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
136 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
140 /* Configure how many packets ahead to prefetch, when reading packets */
141 #define PREFETCH_OFFSET 3
144 * Configurable number of RX/TX ring descriptors
146 #define RTE_TEST_RX_DESC_DEFAULT 128
147 #define RTE_TEST_TX_DESC_DEFAULT 512
149 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
150 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
152 /* ethernet addresses of ports */
153 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
156 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
157 #define IPv4_BYTES(addr) \
158 (uint8_t) (((addr) >> 24) & 0xFF),\
159 (uint8_t) (((addr) >> 16) & 0xFF),\
160 (uint8_t) (((addr) >> 8) & 0xFF),\
161 (uint8_t) ((addr) & 0xFF)
165 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
166 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
167 #define IPv6_BYTES(addr) \
168 addr[0], addr[1], addr[2], addr[3], \
169 addr[4], addr[5], addr[6], addr[7], \
170 addr[8], addr[9], addr[10], addr[11],\
171 addr[12], addr[13],addr[14], addr[15]
174 #define IPV6_ADDR_LEN 16
176 /* mask of enabled ports */
177 static uint32_t enabled_port_mask = 0;
179 static int rx_queue_per_lcore = 1;
185 struct rte_mbuf *m_table[0];
189 struct rte_ip_frag_tbl *frag_tbl;
190 struct rte_mempool *pool;
192 struct rte_lpm6 *lpm6;
196 struct tx_lcore_stat {
203 #define MAX_RX_QUEUE_PER_LCORE 16
204 #define MAX_TX_QUEUE_PER_PORT 16
205 #define MAX_RX_QUEUE_PER_PORT 128
207 struct lcore_queue_conf {
209 struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
210 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
211 struct rte_ip_frag_death_row death_row;
212 struct mbuf_table *tx_mbufs[RTE_MAX_ETHPORTS];
213 struct tx_lcore_stat tx_stat;
214 } __rte_cache_aligned;
215 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
217 static struct rte_eth_conf port_conf = {
219 .mq_mode = ETH_MQ_RX_RSS,
220 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
222 .header_split = 0, /**< Header Split disabled */
223 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
224 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
225 .jumbo_frame = 1, /**< Jumbo Frame Support disabled */
226 .hw_strip_crc = 0, /**< CRC stripped by hardware */
231 .rss_hf = ETH_RSS_IP,
235 .mq_mode = ETH_MQ_TX_NONE,
239 static const struct rte_eth_rxconf rx_conf = {
241 .pthresh = RX_PTHRESH,
242 .hthresh = RX_HTHRESH,
243 .wthresh = RX_WTHRESH,
245 .rx_free_thresh = 32,
248 static const struct rte_eth_txconf tx_conf = {
250 .pthresh = TX_PTHRESH,
251 .hthresh = TX_HTHRESH,
252 .wthresh = TX_WTHRESH,
254 .tx_free_thresh = 0, /* Use PMD default values */
255 .tx_rs_thresh = 0, /* Use PMD default values */
260 * IPv4 forwarding table
262 struct l3fwd_ipv4_route {
268 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
269 {IPv4(100,10,0,0), 16, 0},
270 {IPv4(100,20,0,0), 16, 1},
271 {IPv4(100,30,0,0), 16, 2},
272 {IPv4(100,40,0,0), 16, 3},
273 {IPv4(100,50,0,0), 16, 4},
274 {IPv4(100,60,0,0), 16, 5},
275 {IPv4(100,70,0,0), 16, 6},
276 {IPv4(100,80,0,0), 16, 7},
280 * IPv6 forwarding table
283 struct l3fwd_ipv6_route {
284 uint8_t ip[IPV6_ADDR_LEN];
289 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
290 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
291 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
292 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
293 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
294 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
295 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
296 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
297 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
300 #define LPM_MAX_RULES 1024
301 #define LPM6_MAX_RULES 1024
302 #define LPM6_NUMBER_TBL8S (1 << 16)
304 struct rte_lpm6_config lpm6_config = {
305 .max_rules = LPM6_MAX_RULES,
306 .number_tbl8s = LPM6_NUMBER_TBL8S,
310 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
311 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
313 #ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
314 #define TX_LCORE_STAT_UPDATE(s, f, v) ((s)->f += (v))
316 #define TX_LCORE_STAT_UPDATE(s, f, v) do {} while (0)
317 #endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
320 * If number of queued packets reached given threahold, then
321 * send burst of packets on an output interface.
323 static inline uint32_t
324 send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
326 uint32_t fill, len, k, n;
327 struct mbuf_table *txmb;
329 txmb = qconf->tx_mbufs[port];
332 if ((int32_t)(fill = txmb->head - txmb->tail) < 0)
335 if (fill >= thresh) {
336 n = RTE_MIN(len - txmb->tail, fill);
338 k = rte_eth_tx_burst(port, qconf->tx_queue_id[port],
339 txmb->m_table + txmb->tail, (uint16_t)n);
341 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, call, 1);
342 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, send, k);
345 if ((txmb->tail += k) == len)
352 /* Enqueue a single packet, and send burst if queue is filled */
354 send_single_packet(struct rte_mbuf *m, uint8_t port)
356 uint32_t fill, lcore_id, len;
357 struct lcore_queue_conf *qconf;
358 struct mbuf_table *txmb;
360 lcore_id = rte_lcore_id();
361 qconf = &lcore_queue_conf[lcore_id];
363 txmb = qconf->tx_mbufs[port];
366 fill = send_burst(qconf, MAX_PKT_BURST, port);
368 if (fill == len - 1) {
369 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, drop, 1);
370 rte_pktmbuf_free(txmb->m_table[txmb->tail]);
371 if (++txmb->tail == len)
375 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, queue, 1);
376 txmb->m_table[txmb->head] = m;
377 if(++txmb->head == len)
384 reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
385 struct lcore_queue_conf *qconf, uint64_t tms)
387 struct ether_hdr *eth_hdr;
388 struct rte_ip_frag_tbl *tbl;
389 struct rte_ip_frag_death_row *dr;
390 struct rx_queue *rxq;
392 uint8_t next_hop, dst_port;
394 rxq = &qconf->rx_queue_list[queue];
396 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
400 /* if packet is IPv4 */
401 if (m->ol_flags & (PKT_RX_IPV4_HDR)) {
402 struct ipv4_hdr *ip_hdr;
405 ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
407 /* if it is a fragmented packet, then try to reassemble. */
408 if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
412 dr = &qconf->death_row;
414 /* prepare mbuf: setup l2_len/l3_len. */
415 m->l2_len = sizeof(*eth_hdr);
416 m->l3_len = sizeof(*ip_hdr);
418 /* process this fragment. */
419 mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
421 /* no packet to send out. */
424 /* we have our packet reassembled. */
427 eth_hdr = rte_pktmbuf_mtod(m,
429 ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
432 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
434 /* Find destination port */
435 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
436 (enabled_port_mask & 1 << next_hop) != 0) {
440 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
442 /* if packet is IPv6 */
443 else if (m->ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) {
444 struct ipv6_extension_fragment *frag_hdr;
445 struct ipv6_hdr *ip_hdr;
447 ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
449 frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(ip_hdr);
451 if (frag_hdr != NULL) {
455 dr = &qconf->death_row;
457 /* prepare mbuf: setup l2_len/l3_len. */
458 m->l2_len = sizeof(*eth_hdr);
459 m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
461 mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
467 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
468 ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
472 /* Find destination port */
473 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop) == 0 &&
474 (enabled_port_mask & 1 << next_hop) != 0) {
478 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
480 /* if packet wasn't IPv4 or IPv6, it's forwarded to the port it came from */
482 /* 02:00:00:00:00:xx */
483 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
484 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
487 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
489 send_single_packet(m, dst_port);
492 /* main processing loop */
494 main_loop(__attribute__((unused)) void *dummy)
496 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
498 uint64_t diff_tsc, cur_tsc, prev_tsc;
501 struct lcore_queue_conf *qconf;
502 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
506 lcore_id = rte_lcore_id();
507 qconf = &lcore_queue_conf[lcore_id];
509 if (qconf->n_rx_queue == 0) {
510 RTE_LOG(INFO, IP_RSMBL, "lcore %u has nothing to do\n", lcore_id);
514 RTE_LOG(INFO, IP_RSMBL, "entering main loop on lcore %u\n", lcore_id);
516 for (i = 0; i < qconf->n_rx_queue; i++) {
518 portid = qconf->rx_queue_list[i].portid;
519 RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%hhu\n", lcore_id,
525 cur_tsc = rte_rdtsc();
528 * TX burst queue drain
530 diff_tsc = cur_tsc - prev_tsc;
531 if (unlikely(diff_tsc > drain_tsc)) {
534 * This could be optimized (use queueid instead of
535 * portid), but it is not called so often
537 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
538 if ((enabled_port_mask & (1 << portid)) != 0)
539 send_burst(qconf, 1, portid);
546 * Read packet from RX queues
548 for (i = 0; i < qconf->n_rx_queue; ++i) {
550 portid = qconf->rx_queue_list[i].portid;
552 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
555 /* Prefetch first packets */
556 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
557 rte_prefetch0(rte_pktmbuf_mtod(
558 pkts_burst[j], void *));
561 /* Prefetch and forward already prefetched packets */
562 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
563 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
564 j + PREFETCH_OFFSET], void *));
565 reassemble(pkts_burst[j], portid,
569 /* Forward remaining prefetched packets */
570 for (; j < nb_rx; j++) {
571 reassemble(pkts_burst[j], portid,
575 rte_ip_frag_free_death_row(&qconf->death_row,
583 print_usage(const char *prgname)
585 printf("%s [EAL options] -- -p PORTMASK [-q NQ]"
586 " [--max-pkt-len PKTLEN]"
587 " [--maxflows=<flows>] [--flowttl=<ttl>[(s|ms)]]\n"
588 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
589 " -q NQ: number of RX queues per lcore\n"
590 " --maxflows=<flows>: optional, maximum number of flows "
592 " --flowttl=<ttl>[(s|ms)]: optional, maximum TTL for each "
598 parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
603 /* parse decimal string */
605 v = strtoul(str, &end, 10);
606 if (errno != 0 || *end != '\0')
609 if (v < min || v > max)
617 parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
622 static const char frmt_sec[] = "s";
623 static const char frmt_msec[] = "ms";
625 /* parse decimal string */
627 v = strtoul(str, &end, 10);
632 if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
634 else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
638 if (v < min || v > max)
646 parse_portmask(const char *portmask)
651 /* parse hexadecimal string */
652 pm = strtoul(portmask, &end, 16);
653 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
663 parse_nqueue(const char *q_arg)
668 printf("%p\n", q_arg);
670 /* parse hexadecimal string */
671 n = strtoul(q_arg, &end, 10);
672 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
676 if (n >= MAX_RX_QUEUE_PER_LCORE)
682 /* Parse the argument given in the command line of the application */
684 parse_args(int argc, char **argv)
689 char *prgname = argv[0];
690 static struct option lgopts[] = {
691 {"max-pkt-len", 1, 0, 0},
692 {"maxflows", 1, 0, 0},
693 {"flowttl", 1, 0, 0},
699 while ((opt = getopt_long(argc, argvopt, "p:q:",
700 lgopts, &option_index)) != EOF) {
705 enabled_port_mask = parse_portmask(optarg);
706 if (enabled_port_mask == 0) {
707 printf("invalid portmask\n");
708 print_usage(prgname);
715 rx_queue_per_lcore = parse_nqueue(optarg);
716 if (rx_queue_per_lcore < 0) {
717 printf("invalid queue number\n");
718 print_usage(prgname);
725 if (!strncmp(lgopts[option_index].name,
727 if ((ret = parse_flow_num(optarg, MIN_FLOW_NUM,
729 &max_flow_num)) != 0) {
730 printf("invalid value: \"%s\" for "
733 lgopts[option_index].name);
734 print_usage(prgname);
739 if (!strncmp(lgopts[option_index].name, "flowttl", 7)) {
740 if ((ret = parse_flow_ttl(optarg, MIN_FLOW_TTL,
742 &max_flow_ttl)) != 0) {
743 printf("invalid value: \"%s\" for "
746 lgopts[option_index].name);
747 print_usage(prgname);
755 print_usage(prgname);
761 argv[optind-1] = prgname;
764 optind = 0; /* reset getopt lib */
769 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
771 printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
772 eth_addr->addr_bytes[0],
773 eth_addr->addr_bytes[1],
774 eth_addr->addr_bytes[2],
775 eth_addr->addr_bytes[3],
776 eth_addr->addr_bytes[4],
777 eth_addr->addr_bytes[5]);
780 /* Check the link status of all ports in up to 9s, and print them finally */
782 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
784 #define CHECK_INTERVAL 100 /* 100ms */
785 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
786 uint8_t portid, count, all_ports_up, print_flag = 0;
787 struct rte_eth_link link;
789 printf("\nChecking link status");
791 for (count = 0; count <= MAX_CHECK_TIME; count++) {
793 for (portid = 0; portid < port_num; portid++) {
794 if ((port_mask & (1 << portid)) == 0)
796 memset(&link, 0, sizeof(link));
797 rte_eth_link_get_nowait(portid, &link);
798 /* print link status if flag set */
799 if (print_flag == 1) {
800 if (link.link_status)
801 printf("Port %d Link Up - speed %u "
802 "Mbps - %s\n", (uint8_t)portid,
803 (unsigned)link.link_speed,
804 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
805 ("full-duplex") : ("half-duplex\n"));
807 printf("Port %d Link Down\n",
811 /* clear all_ports_up flag if any link down */
812 if (link.link_status == 0) {
817 /* after finally printing all link status, get out */
821 if (all_ports_up == 0) {
824 rte_delay_ms(CHECK_INTERVAL);
827 /* set the print_flag if all ports up or timeout */
828 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
836 init_routing_table(void)
839 struct rte_lpm6 *lpm6;
843 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
844 if (socket_lpm[socket]) {
845 lpm = socket_lpm[socket];
846 /* populate the LPM table */
847 for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
848 ret = rte_lpm_add(lpm,
849 l3fwd_ipv4_route_array[i].ip,
850 l3fwd_ipv4_route_array[i].depth,
851 l3fwd_ipv4_route_array[i].if_out);
854 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
859 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv4_BYTES_FMT
862 IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
863 l3fwd_ipv4_route_array[i].depth,
864 l3fwd_ipv4_route_array[i].if_out);
868 if (socket_lpm6[socket]) {
869 lpm6 = socket_lpm6[socket];
870 /* populate the LPM6 table */
871 for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
872 ret = rte_lpm6_add(lpm6,
873 l3fwd_ipv6_route_array[i].ip,
874 l3fwd_ipv6_route_array[i].depth,
875 l3fwd_ipv6_route_array[i].if_out);
878 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
883 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv6_BYTES_FMT
886 IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
887 l3fwd_ipv6_route_array[i].depth,
888 l3fwd_ipv6_route_array[i].if_out);
896 setup_port_tbl(struct lcore_queue_conf *qconf, uint32_t lcore, int socket,
899 struct mbuf_table *mtb;
903 n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
904 sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
906 if ((mtb = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
908 RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
909 "failed to allocate %zu bytes\n",
910 __func__, lcore, port, sz);
915 qconf->tx_mbufs[port] = mtb;
921 setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
925 uint64_t frag_cycles;
926 char buf[RTE_MEMPOOL_NAMESIZE];
928 socket = rte_lcore_to_socket_id(lcore);
929 if (socket == SOCKET_ID_ANY)
932 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
935 if ((rxq->frag_tbl = rte_ip_frag_table_create(max_flow_num,
936 IP_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
938 RTE_LOG(ERR, IP_RSMBL, "ip_frag_tbl_create(%u) on "
939 "lcore: %u for queue: %u failed\n",
940 max_flow_num, lcore, queue);
945 * At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
946 * mbufs could be stored int the fragment table.
947 * Plus, each TX queue can hold up to <max_flow_num> packets.
950 nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
951 nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
952 nb_mbuf *= 2; /* ipv4 and ipv6 */
953 nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + RTE_TEST_TX_DESC_DEFAULT;
955 nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
957 snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
959 if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
960 sizeof(struct rte_pktmbuf_pool_private),
961 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
962 socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
963 RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
975 struct rte_lpm6 *lpm6;
979 /* traverse through lcores and initialize structures on each socket */
981 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
983 if (rte_lcore_is_enabled(lcore_id) == 0)
986 socket = rte_lcore_to_socket_id(lcore_id);
988 if (socket == SOCKET_ID_ANY)
991 if (socket_lpm[socket] == NULL) {
992 RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
993 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
995 lpm = rte_lpm_create(buf, socket, LPM_MAX_RULES, 0);
997 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
1000 socket_lpm[socket] = lpm;
1003 if (socket_lpm6[socket] == NULL) {
1004 RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
1005 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
1007 lpm6 = rte_lpm6_create("IP_RSMBL_LPM6", socket, &lpm6_config);
1009 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
1012 socket_lpm6[socket] = lpm6;
1020 queue_dump_stat(void)
1023 const struct lcore_queue_conf *qconf;
1025 for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
1026 if (rte_lcore_is_enabled(lcore) == 0)
1029 qconf = &lcore_queue_conf[lcore];
1030 for (i = 0; i < qconf->n_rx_queue; i++) {
1032 fprintf(stdout, " -- lcoreid=%u portid=%hhu "
1034 lcore, qconf->rx_queue_list[i].portid);
1035 rte_ip_frag_table_statistics_dump(stdout,
1036 qconf->rx_queue_list[i].frag_tbl);
1037 fprintf(stdout, "TX bursts:\t%" PRIu64 "\n"
1038 "TX packets _queued:\t%" PRIu64 "\n"
1039 "TX packets dropped:\t%" PRIu64 "\n"
1040 "TX packets send:\t%" PRIu64 "\n",
1041 qconf->tx_stat.call,
1042 qconf->tx_stat.queue,
1043 qconf->tx_stat.drop,
1044 qconf->tx_stat.send);
1050 signal_handler(int signum)
1053 if (signum != SIGUSR1)
1054 rte_exit(0, "received signal: %d, exiting\n", signum);
1058 MAIN(int argc, char **argv)
1060 struct lcore_queue_conf *qconf;
1061 struct rx_queue *rxq;
1065 unsigned lcore_id = 0, rx_lcore_id = 0;
1066 uint32_t n_tx_queue, nb_lcores;
1070 ret = rte_eal_init(argc, argv);
1072 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1076 /* parse application arguments (after the EAL ones) */
1077 ret = parse_args(argc, argv);
1079 rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
1081 if (rte_eal_pci_probe() < 0)
1082 rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
1084 nb_ports = rte_eth_dev_count();
1085 if (nb_ports > RTE_MAX_ETHPORTS)
1086 nb_ports = RTE_MAX_ETHPORTS;
1087 else if (nb_ports == 0)
1088 rte_exit(EXIT_FAILURE, "No ports found!\n");
1090 nb_lcores = rte_lcore_count();
1092 /* initialize structures (mempools, lpm etc.) */
1094 rte_panic("Cannot initialize memory structures!\n");
1096 /* check if portmask has non-existent ports */
1097 if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
1098 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
1100 /* initialize all ports */
1101 for (portid = 0; portid < nb_ports; portid++) {
1102 /* skip ports that are not enabled */
1103 if ((enabled_port_mask & (1 << portid)) == 0) {
1104 printf("\nSkipping disabled port %d\n", portid);
1108 qconf = &lcore_queue_conf[rx_lcore_id];
1110 /* get the lcore_id for this port */
1111 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1112 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
1115 if (rx_lcore_id >= RTE_MAX_LCORE)
1116 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1118 qconf = &lcore_queue_conf[rx_lcore_id];
1121 socket = rte_lcore_to_socket_id(portid);
1122 if (socket == SOCKET_ID_ANY)
1125 queueid = qconf->n_rx_queue;
1126 rxq = &qconf->rx_queue_list[queueid];
1127 rxq->portid = portid;
1128 rxq->lpm = socket_lpm[socket];
1129 rxq->lpm6 = socket_lpm6[socket];
1130 if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
1131 rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
1132 qconf->n_rx_queue++;
1135 printf("Initializing port %d ... ", portid );
1138 n_tx_queue = nb_lcores;
1139 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1140 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1141 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
1145 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1146 "err=%d, port=%d\n",
1150 /* init one RX queue */
1151 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1156 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1157 "err=%d, port=%d\n",
1161 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1162 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1165 /* init one TX queue per couple (lcore,port) */
1167 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1168 if (rte_lcore_is_enabled(lcore_id) == 0)
1171 socket = (int) rte_lcore_to_socket_id(lcore_id);
1173 printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
1175 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1178 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1179 "port=%d\n", ret, portid);
1181 qconf = &lcore_queue_conf[lcore_id];
1182 qconf->tx_queue_id[portid] = queueid;
1183 setup_port_tbl(qconf, lcore_id, socket, portid);
1192 for (portid = 0; portid < nb_ports; portid++) {
1193 if ((enabled_port_mask & (1 << portid)) == 0) {
1197 ret = rte_eth_dev_start(portid);
1199 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1202 rte_eth_promiscuous_enable(portid);
1205 if (init_routing_table() < 0)
1206 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1208 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1210 signal(SIGUSR1, signal_handler);
1211 signal(SIGTERM, signal_handler);
1212 signal(SIGINT, signal_handler);
1214 /* launch per-lcore init on every lcore */
1215 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1216 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1217 if (rte_eal_wait_lcore(lcore_id) < 0)