4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <sys/param.h>
47 #include <rte_common.h>
48 #include <rte_byteorder.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_mempool.h>
68 #include <rte_malloc.h>
72 #include <rte_string_fns.h>
76 #include <rte_ip_frag.h>
78 #define MAX_PKT_BURST 32
81 #define RTE_LOGTYPE_IP_RSMBL RTE_LOGTYPE_USER1
83 #define MAX_JUMBO_PKT_LEN 9600
85 #define BUF_SIZE RTE_MBUF_DEFAULT_DATAROOM
86 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
89 #define MEMPOOL_CACHE_SIZE 256
91 /* allow max jumbo frame 9.5 KB */
92 #define JUMBO_FRAME_MAX_SIZE 0x2600
94 #define MAX_FLOW_NUM UINT16_MAX
95 #define MIN_FLOW_NUM 1
96 #define DEF_FLOW_NUM 0x1000
98 /* TTL numbers are in ms. */
99 #define MAX_FLOW_TTL (3600 * MS_PER_S)
100 #define MIN_FLOW_TTL 1
101 #define DEF_FLOW_TTL MS_PER_S
103 #define MAX_FRAG_NUM RTE_LIBRTE_IP_FRAG_MAX_FRAG
105 /* Should be power of two. */
106 #define IP_FRAG_TBL_BUCKET_ENTRIES 16
108 static uint32_t max_flow_num = DEF_FLOW_NUM;
109 static uint32_t max_flow_ttl = DEF_FLOW_TTL;
111 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
115 /* Configure how many packets ahead to prefetch, when reading packets */
116 #define PREFETCH_OFFSET 3
119 * Configurable number of RX/TX ring descriptors
121 #define RTE_TEST_RX_DESC_DEFAULT 128
122 #define RTE_TEST_TX_DESC_DEFAULT 512
124 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
125 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
127 /* ethernet addresses of ports */
128 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
131 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
132 #define IPv4_BYTES(addr) \
133 (uint8_t) (((addr) >> 24) & 0xFF),\
134 (uint8_t) (((addr) >> 16) & 0xFF),\
135 (uint8_t) (((addr) >> 8) & 0xFF),\
136 (uint8_t) ((addr) & 0xFF)
140 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
141 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
142 #define IPv6_BYTES(addr) \
143 addr[0], addr[1], addr[2], addr[3], \
144 addr[4], addr[5], addr[6], addr[7], \
145 addr[8], addr[9], addr[10], addr[11],\
146 addr[12], addr[13],addr[14], addr[15]
149 #define IPV6_ADDR_LEN 16
151 /* mask of enabled ports */
152 static uint32_t enabled_port_mask = 0;
154 static int rx_queue_per_lcore = 1;
160 struct rte_mbuf *m_table[0];
164 struct rte_ip_frag_tbl *frag_tbl;
165 struct rte_mempool *pool;
167 struct rte_lpm6 *lpm6;
171 struct tx_lcore_stat {
178 #define MAX_RX_QUEUE_PER_LCORE 16
179 #define MAX_TX_QUEUE_PER_PORT 16
180 #define MAX_RX_QUEUE_PER_PORT 128
182 struct lcore_queue_conf {
184 struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
185 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
186 struct rte_ip_frag_death_row death_row;
187 struct mbuf_table *tx_mbufs[RTE_MAX_ETHPORTS];
188 struct tx_lcore_stat tx_stat;
189 } __rte_cache_aligned;
190 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
192 static struct rte_eth_conf port_conf = {
194 .mq_mode = ETH_MQ_RX_RSS,
195 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
197 .header_split = 0, /**< Header Split disabled */
198 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
199 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
200 .jumbo_frame = 1, /**< Jumbo Frame Support disabled */
201 .hw_strip_crc = 1, /**< CRC stripped by hardware */
206 .rss_hf = ETH_RSS_IP,
210 .mq_mode = ETH_MQ_TX_NONE,
215 * IPv4 forwarding table
217 struct l3fwd_ipv4_route {
223 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
224 {IPv4(100,10,0,0), 16, 0},
225 {IPv4(100,20,0,0), 16, 1},
226 {IPv4(100,30,0,0), 16, 2},
227 {IPv4(100,40,0,0), 16, 3},
228 {IPv4(100,50,0,0), 16, 4},
229 {IPv4(100,60,0,0), 16, 5},
230 {IPv4(100,70,0,0), 16, 6},
231 {IPv4(100,80,0,0), 16, 7},
235 * IPv6 forwarding table
238 struct l3fwd_ipv6_route {
239 uint8_t ip[IPV6_ADDR_LEN];
244 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
245 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
246 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
247 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
248 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
249 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
250 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
251 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
252 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
255 #define LPM_MAX_RULES 1024
256 #define LPM6_MAX_RULES 1024
257 #define LPM6_NUMBER_TBL8S (1 << 16)
259 struct rte_lpm6_config lpm6_config = {
260 .max_rules = LPM6_MAX_RULES,
261 .number_tbl8s = LPM6_NUMBER_TBL8S,
265 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
266 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
268 #ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
269 #define TX_LCORE_STAT_UPDATE(s, f, v) ((s)->f += (v))
271 #define TX_LCORE_STAT_UPDATE(s, f, v) do {} while (0)
272 #endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
275 * If number of queued packets reached given threahold, then
276 * send burst of packets on an output interface.
278 static inline uint32_t
279 send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint16_t port)
281 uint32_t fill, len, k, n;
282 struct mbuf_table *txmb;
284 txmb = qconf->tx_mbufs[port];
287 if ((int32_t)(fill = txmb->head - txmb->tail) < 0)
290 if (fill >= thresh) {
291 n = RTE_MIN(len - txmb->tail, fill);
293 k = rte_eth_tx_burst(port, qconf->tx_queue_id[port],
294 txmb->m_table + txmb->tail, (uint16_t)n);
296 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, call, 1);
297 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, send, k);
300 if ((txmb->tail += k) == len)
307 /* Enqueue a single packet, and send burst if queue is filled */
309 send_single_packet(struct rte_mbuf *m, uint16_t port)
311 uint32_t fill, lcore_id, len;
312 struct lcore_queue_conf *qconf;
313 struct mbuf_table *txmb;
315 lcore_id = rte_lcore_id();
316 qconf = &lcore_queue_conf[lcore_id];
318 txmb = qconf->tx_mbufs[port];
321 fill = send_burst(qconf, MAX_PKT_BURST, port);
323 if (fill == len - 1) {
324 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, drop, 1);
325 rte_pktmbuf_free(txmb->m_table[txmb->tail]);
326 if (++txmb->tail == len)
330 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, queue, 1);
331 txmb->m_table[txmb->head] = m;
332 if(++txmb->head == len)
339 reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
340 struct lcore_queue_conf *qconf, uint64_t tms)
342 struct ether_hdr *eth_hdr;
343 struct rte_ip_frag_tbl *tbl;
344 struct rte_ip_frag_death_row *dr;
345 struct rx_queue *rxq;
350 rxq = &qconf->rx_queue_list[queue];
352 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
356 /* if packet is IPv4 */
357 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
358 struct ipv4_hdr *ip_hdr;
361 ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
363 /* if it is a fragmented packet, then try to reassemble. */
364 if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
368 dr = &qconf->death_row;
370 /* prepare mbuf: setup l2_len/l3_len. */
371 m->l2_len = sizeof(*eth_hdr);
372 m->l3_len = sizeof(*ip_hdr);
374 /* process this fragment. */
375 mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
377 /* no packet to send out. */
380 /* we have our packet reassembled. */
383 eth_hdr = rte_pktmbuf_mtod(m,
385 ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
388 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
390 /* Find destination port */
391 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
392 (enabled_port_mask & 1 << next_hop) != 0) {
396 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
397 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
398 /* if packet is IPv6 */
399 struct ipv6_extension_fragment *frag_hdr;
400 struct ipv6_hdr *ip_hdr;
402 ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
404 frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(ip_hdr);
406 if (frag_hdr != NULL) {
410 dr = &qconf->death_row;
412 /* prepare mbuf: setup l2_len/l3_len. */
413 m->l2_len = sizeof(*eth_hdr);
414 m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
416 mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
422 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
423 ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
427 /* Find destination port */
428 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
430 (enabled_port_mask & 1 << next_hop) != 0) {
434 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
436 /* if packet wasn't IPv4 or IPv6, it's forwarded to the port it came from */
438 /* 02:00:00:00:00:xx */
439 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
440 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
443 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
445 send_single_packet(m, dst_port);
448 /* main processing loop */
450 main_loop(__attribute__((unused)) void *dummy)
452 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
454 uint64_t diff_tsc, cur_tsc, prev_tsc;
457 struct lcore_queue_conf *qconf;
458 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
462 lcore_id = rte_lcore_id();
463 qconf = &lcore_queue_conf[lcore_id];
465 if (qconf->n_rx_queue == 0) {
466 RTE_LOG(INFO, IP_RSMBL, "lcore %u has nothing to do\n", lcore_id);
470 RTE_LOG(INFO, IP_RSMBL, "entering main loop on lcore %u\n", lcore_id);
472 for (i = 0; i < qconf->n_rx_queue; i++) {
474 portid = qconf->rx_queue_list[i].portid;
475 RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%u\n", lcore_id,
481 cur_tsc = rte_rdtsc();
484 * TX burst queue drain
486 diff_tsc = cur_tsc - prev_tsc;
487 if (unlikely(diff_tsc > drain_tsc)) {
490 * This could be optimized (use queueid instead of
491 * portid), but it is not called so often
493 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
494 if ((enabled_port_mask & (1 << portid)) != 0)
495 send_burst(qconf, 1, portid);
502 * Read packet from RX queues
504 for (i = 0; i < qconf->n_rx_queue; ++i) {
506 portid = qconf->rx_queue_list[i].portid;
508 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
511 /* Prefetch first packets */
512 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
513 rte_prefetch0(rte_pktmbuf_mtod(
514 pkts_burst[j], void *));
517 /* Prefetch and forward already prefetched packets */
518 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
519 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
520 j + PREFETCH_OFFSET], void *));
521 reassemble(pkts_burst[j], portid,
525 /* Forward remaining prefetched packets */
526 for (; j < nb_rx; j++) {
527 reassemble(pkts_burst[j], portid,
531 rte_ip_frag_free_death_row(&qconf->death_row,
539 print_usage(const char *prgname)
541 printf("%s [EAL options] -- -p PORTMASK [-q NQ]"
542 " [--max-pkt-len PKTLEN]"
543 " [--maxflows=<flows>] [--flowttl=<ttl>[(s|ms)]]\n"
544 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
545 " -q NQ: number of RX queues per lcore\n"
546 " --maxflows=<flows>: optional, maximum number of flows "
548 " --flowttl=<ttl>[(s|ms)]: optional, maximum TTL for each "
554 parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
559 /* parse decimal string */
561 v = strtoul(str, &end, 10);
562 if (errno != 0 || *end != '\0')
565 if (v < min || v > max)
573 parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
578 static const char frmt_sec[] = "s";
579 static const char frmt_msec[] = "ms";
581 /* parse decimal string */
583 v = strtoul(str, &end, 10);
588 if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
590 else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
594 if (v < min || v > max)
602 parse_portmask(const char *portmask)
607 /* parse hexadecimal string */
608 pm = strtoul(portmask, &end, 16);
609 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
619 parse_nqueue(const char *q_arg)
624 printf("%p\n", q_arg);
626 /* parse hexadecimal string */
627 n = strtoul(q_arg, &end, 10);
628 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
632 if (n >= MAX_RX_QUEUE_PER_LCORE)
638 /* Parse the argument given in the command line of the application */
640 parse_args(int argc, char **argv)
645 char *prgname = argv[0];
646 static struct option lgopts[] = {
647 {"max-pkt-len", 1, 0, 0},
648 {"maxflows", 1, 0, 0},
649 {"flowttl", 1, 0, 0},
655 while ((opt = getopt_long(argc, argvopt, "p:q:",
656 lgopts, &option_index)) != EOF) {
661 enabled_port_mask = parse_portmask(optarg);
662 if (enabled_port_mask == 0) {
663 printf("invalid portmask\n");
664 print_usage(prgname);
671 rx_queue_per_lcore = parse_nqueue(optarg);
672 if (rx_queue_per_lcore < 0) {
673 printf("invalid queue number\n");
674 print_usage(prgname);
681 if (!strncmp(lgopts[option_index].name,
683 if ((ret = parse_flow_num(optarg, MIN_FLOW_NUM,
685 &max_flow_num)) != 0) {
686 printf("invalid value: \"%s\" for "
689 lgopts[option_index].name);
690 print_usage(prgname);
695 if (!strncmp(lgopts[option_index].name, "flowttl", 7)) {
696 if ((ret = parse_flow_ttl(optarg, MIN_FLOW_TTL,
698 &max_flow_ttl)) != 0) {
699 printf("invalid value: \"%s\" for "
702 lgopts[option_index].name);
703 print_usage(prgname);
711 print_usage(prgname);
717 argv[optind-1] = prgname;
720 optind = 1; /* reset getopt lib */
725 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
727 char buf[ETHER_ADDR_FMT_SIZE];
728 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
729 printf("%s%s", name, buf);
732 /* Check the link status of all ports in up to 9s, and print them finally */
734 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
736 #define CHECK_INTERVAL 100 /* 100ms */
737 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
739 uint8_t count, all_ports_up, print_flag = 0;
740 struct rte_eth_link link;
742 printf("\nChecking link status");
744 for (count = 0; count <= MAX_CHECK_TIME; count++) {
746 for (portid = 0; portid < port_num; portid++) {
747 if ((port_mask & (1 << portid)) == 0)
749 memset(&link, 0, sizeof(link));
750 rte_eth_link_get_nowait(portid, &link);
751 /* print link status if flag set */
752 if (print_flag == 1) {
753 if (link.link_status)
755 "Port%d Link Up. Speed %u Mbps - %s\n",
756 portid, link.link_speed,
757 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
758 ("full-duplex") : ("half-duplex\n"));
760 printf("Port %d Link Down\n", portid);
763 /* clear all_ports_up flag if any link down */
764 if (link.link_status == ETH_LINK_DOWN) {
769 /* after finally printing all link status, get out */
773 if (all_ports_up == 0) {
776 rte_delay_ms(CHECK_INTERVAL);
779 /* set the print_flag if all ports up or timeout */
780 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
788 init_routing_table(void)
791 struct rte_lpm6 *lpm6;
795 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
796 if (socket_lpm[socket]) {
797 lpm = socket_lpm[socket];
798 /* populate the LPM table */
799 for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
800 ret = rte_lpm_add(lpm,
801 l3fwd_ipv4_route_array[i].ip,
802 l3fwd_ipv4_route_array[i].depth,
803 l3fwd_ipv4_route_array[i].if_out);
806 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
811 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv4_BYTES_FMT
814 IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
815 l3fwd_ipv4_route_array[i].depth,
816 l3fwd_ipv4_route_array[i].if_out);
820 if (socket_lpm6[socket]) {
821 lpm6 = socket_lpm6[socket];
822 /* populate the LPM6 table */
823 for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
824 ret = rte_lpm6_add(lpm6,
825 l3fwd_ipv6_route_array[i].ip,
826 l3fwd_ipv6_route_array[i].depth,
827 l3fwd_ipv6_route_array[i].if_out);
830 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
835 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv6_BYTES_FMT
838 IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
839 l3fwd_ipv6_route_array[i].depth,
840 l3fwd_ipv6_route_array[i].if_out);
848 setup_port_tbl(struct lcore_queue_conf *qconf, uint32_t lcore, int socket,
851 struct mbuf_table *mtb;
855 n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
856 sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
858 if ((mtb = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
860 RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
861 "failed to allocate %zu bytes\n",
862 __func__, lcore, port, sz);
867 qconf->tx_mbufs[port] = mtb;
873 setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
877 uint64_t frag_cycles;
878 char buf[RTE_MEMPOOL_NAMESIZE];
880 socket = rte_lcore_to_socket_id(lcore);
881 if (socket == SOCKET_ID_ANY)
884 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
887 if ((rxq->frag_tbl = rte_ip_frag_table_create(max_flow_num,
888 IP_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
890 RTE_LOG(ERR, IP_RSMBL, "ip_frag_tbl_create(%u) on "
891 "lcore: %u for queue: %u failed\n",
892 max_flow_num, lcore, queue);
897 * At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
898 * mbufs could be stored int the fragment table.
899 * Plus, each TX queue can hold up to <max_flow_num> packets.
902 nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
903 nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
904 nb_mbuf *= 2; /* ipv4 and ipv6 */
905 nb_mbuf += nb_rxd + nb_txd;
907 nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
909 snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
911 rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf, MEMPOOL_CACHE_SIZE, 0,
912 MBUF_DATA_SIZE, socket);
913 if (rxq->pool == NULL) {
914 RTE_LOG(ERR, IP_RSMBL,
915 "rte_pktmbuf_pool_create(%s) failed", buf);
927 struct rte_lpm6 *lpm6;
928 struct rte_lpm_config lpm_config;
932 /* traverse through lcores and initialize structures on each socket */
934 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
936 if (rte_lcore_is_enabled(lcore_id) == 0)
939 socket = rte_lcore_to_socket_id(lcore_id);
941 if (socket == SOCKET_ID_ANY)
944 if (socket_lpm[socket] == NULL) {
945 RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
946 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
948 lpm_config.max_rules = LPM_MAX_RULES;
949 lpm_config.number_tbl8s = 256;
950 lpm_config.flags = 0;
952 lpm = rte_lpm_create(buf, socket, &lpm_config);
954 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
957 socket_lpm[socket] = lpm;
960 if (socket_lpm6[socket] == NULL) {
961 RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
962 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
964 lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
966 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
969 socket_lpm6[socket] = lpm6;
977 queue_dump_stat(void)
980 const struct lcore_queue_conf *qconf;
982 for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
983 if (rte_lcore_is_enabled(lcore) == 0)
986 qconf = &lcore_queue_conf[lcore];
987 for (i = 0; i < qconf->n_rx_queue; i++) {
989 fprintf(stdout, " -- lcoreid=%u portid=%u "
991 lcore, qconf->rx_queue_list[i].portid);
992 rte_ip_frag_table_statistics_dump(stdout,
993 qconf->rx_queue_list[i].frag_tbl);
994 fprintf(stdout, "TX bursts:\t%" PRIu64 "\n"
995 "TX packets _queued:\t%" PRIu64 "\n"
996 "TX packets dropped:\t%" PRIu64 "\n"
997 "TX packets send:\t%" PRIu64 "\n",
999 qconf->tx_stat.queue,
1000 qconf->tx_stat.drop,
1001 qconf->tx_stat.send);
1007 signal_handler(int signum)
1010 if (signum != SIGUSR1)
1011 rte_exit(0, "received signal: %d, exiting\n", signum);
1015 main(int argc, char **argv)
1017 struct lcore_queue_conf *qconf;
1018 struct rte_eth_dev_info dev_info;
1019 struct rte_eth_txconf *txconf;
1020 struct rx_queue *rxq;
1024 unsigned lcore_id = 0, rx_lcore_id = 0;
1025 uint32_t n_tx_queue, nb_lcores;
1029 ret = rte_eal_init(argc, argv);
1031 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1035 /* parse application arguments (after the EAL ones) */
1036 ret = parse_args(argc, argv);
1038 rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
1040 nb_ports = rte_eth_dev_count();
1042 rte_exit(EXIT_FAILURE, "No ports found!\n");
1044 nb_lcores = rte_lcore_count();
1046 /* initialize structures (mempools, lpm etc.) */
1048 rte_panic("Cannot initialize memory structures!\n");
1050 /* check if portmask has non-existent ports */
1051 if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
1052 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
1054 /* initialize all ports */
1055 for (portid = 0; portid < nb_ports; portid++) {
1056 /* skip ports that are not enabled */
1057 if ((enabled_port_mask & (1 << portid)) == 0) {
1058 printf("\nSkipping disabled port %d\n", portid);
1062 qconf = &lcore_queue_conf[rx_lcore_id];
1064 /* limit the frame size to the maximum supported by NIC */
1065 rte_eth_dev_info_get(portid, &dev_info);
1066 port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
1067 dev_info.max_rx_pktlen, port_conf.rxmode.max_rx_pkt_len);
1069 /* get the lcore_id for this port */
1070 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1071 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
1074 if (rx_lcore_id >= RTE_MAX_LCORE)
1075 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1077 qconf = &lcore_queue_conf[rx_lcore_id];
1080 socket = rte_lcore_to_socket_id(portid);
1081 if (socket == SOCKET_ID_ANY)
1084 queueid = qconf->n_rx_queue;
1085 rxq = &qconf->rx_queue_list[queueid];
1086 rxq->portid = portid;
1087 rxq->lpm = socket_lpm[socket];
1088 rxq->lpm6 = socket_lpm6[socket];
1090 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1093 rte_exit(EXIT_FAILURE,
1094 "Cannot adjust number of descriptors: err=%d, port=%d\n",
1097 if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
1098 rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
1099 qconf->n_rx_queue++;
1102 printf("Initializing port %d ... ", portid );
1105 n_tx_queue = nb_lcores;
1106 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1107 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1108 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
1112 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1113 "err=%d, port=%d\n",
1117 /* init one RX queue */
1118 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1123 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1124 "err=%d, port=%d\n",
1128 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1129 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1132 /* init one TX queue per couple (lcore,port) */
1134 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1135 if (rte_lcore_is_enabled(lcore_id) == 0)
1138 socket = (int) rte_lcore_to_socket_id(lcore_id);
1140 printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
1143 txconf = &dev_info.default_txconf;
1144 txconf->txq_flags = 0;
1146 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1149 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1150 "port=%d\n", ret, portid);
1152 qconf = &lcore_queue_conf[lcore_id];
1153 qconf->tx_queue_id[portid] = queueid;
1154 setup_port_tbl(qconf, lcore_id, socket, portid);
1163 for (portid = 0; portid < nb_ports; portid++) {
1164 if ((enabled_port_mask & (1 << portid)) == 0) {
1168 ret = rte_eth_dev_start(portid);
1170 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1173 rte_eth_promiscuous_enable(portid);
1176 if (init_routing_table() < 0)
1177 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1179 check_all_ports_link_status(nb_ports, enabled_port_mask);
1181 signal(SIGUSR1, signal_handler);
1182 signal(SIGTERM, signal_handler);
1183 signal(SIGINT, signal_handler);
1185 /* launch per-lcore init on every lcore */
1186 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1187 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1188 if (rte_eal_wait_lcore(lcore_id) < 0)