1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
16 #include <sys/param.h>
18 #include <rte_common.h>
19 #include <rte_byteorder.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
24 #include <rte_launch.h>
25 #include <rte_atomic.h>
26 #include <rte_cycles.h>
27 #include <rte_prefetch.h>
28 #include <rte_lcore.h>
29 #include <rte_per_lcore.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_interrupts.h>
32 #include <rte_random.h>
33 #include <rte_debug.h>
34 #include <rte_ether.h>
35 #include <rte_ethdev.h>
36 #include <rte_mempool.h>
38 #include <rte_malloc.h>
42 #include <rte_string_fns.h>
46 #include <rte_ip_frag.h>
48 #define MAX_PKT_BURST 32
51 #define RTE_LOGTYPE_IP_RSMBL RTE_LOGTYPE_USER1
53 #define MAX_JUMBO_PKT_LEN 9600
55 #define BUF_SIZE RTE_MBUF_DEFAULT_DATAROOM
56 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
59 #define MEMPOOL_CACHE_SIZE 256
61 /* allow max jumbo frame 9.5 KB */
62 #define JUMBO_FRAME_MAX_SIZE 0x2600
64 #define MAX_FLOW_NUM UINT16_MAX
65 #define MIN_FLOW_NUM 1
66 #define DEF_FLOW_NUM 0x1000
68 /* TTL numbers are in ms. */
69 #define MAX_FLOW_TTL (3600 * MS_PER_S)
70 #define MIN_FLOW_TTL 1
71 #define DEF_FLOW_TTL MS_PER_S
73 #define MAX_FRAG_NUM RTE_LIBRTE_IP_FRAG_MAX_FRAG
75 /* Should be power of two. */
76 #define IP_FRAG_TBL_BUCKET_ENTRIES 16
78 static uint32_t max_flow_num = DEF_FLOW_NUM;
79 static uint32_t max_flow_ttl = DEF_FLOW_TTL;
81 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
85 /* Configure how many packets ahead to prefetch, when reading packets */
86 #define PREFETCH_OFFSET 3
89 * Configurable number of RX/TX ring descriptors
91 #define RTE_TEST_RX_DESC_DEFAULT 1024
92 #define RTE_TEST_TX_DESC_DEFAULT 1024
94 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
95 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
97 /* ethernet addresses of ports */
98 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
101 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
102 #define IPv4_BYTES(addr) \
103 (uint8_t) (((addr) >> 24) & 0xFF),\
104 (uint8_t) (((addr) >> 16) & 0xFF),\
105 (uint8_t) (((addr) >> 8) & 0xFF),\
106 (uint8_t) ((addr) & 0xFF)
110 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
111 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
112 #define IPv6_BYTES(addr) \
113 addr[0], addr[1], addr[2], addr[3], \
114 addr[4], addr[5], addr[6], addr[7], \
115 addr[8], addr[9], addr[10], addr[11],\
116 addr[12], addr[13],addr[14], addr[15]
119 #define IPV6_ADDR_LEN 16
121 /* mask of enabled ports */
122 static uint32_t enabled_port_mask = 0;
124 static int rx_queue_per_lcore = 1;
130 struct rte_mbuf *m_table[0];
134 struct rte_ip_frag_tbl *frag_tbl;
135 struct rte_mempool *pool;
137 struct rte_lpm6 *lpm6;
141 struct tx_lcore_stat {
148 #define MAX_RX_QUEUE_PER_LCORE 16
149 #define MAX_TX_QUEUE_PER_PORT 16
150 #define MAX_RX_QUEUE_PER_PORT 128
152 struct lcore_queue_conf {
154 struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
155 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
156 struct rte_ip_frag_death_row death_row;
157 struct mbuf_table *tx_mbufs[RTE_MAX_ETHPORTS];
158 struct tx_lcore_stat tx_stat;
159 } __rte_cache_aligned;
160 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
162 static struct rte_eth_conf port_conf = {
164 .mq_mode = ETH_MQ_RX_RSS,
165 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
167 .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
168 DEV_RX_OFFLOAD_JUMBO_FRAME),
173 .rss_hf = ETH_RSS_IP,
177 .mq_mode = ETH_MQ_TX_NONE,
178 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
179 DEV_TX_OFFLOAD_MULTI_SEGS),
184 * IPv4 forwarding table
186 struct l3fwd_ipv4_route {
192 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
193 {RTE_IPV4(100,10,0,0), 16, 0},
194 {RTE_IPV4(100,20,0,0), 16, 1},
195 {RTE_IPV4(100,30,0,0), 16, 2},
196 {RTE_IPV4(100,40,0,0), 16, 3},
197 {RTE_IPV4(100,50,0,0), 16, 4},
198 {RTE_IPV4(100,60,0,0), 16, 5},
199 {RTE_IPV4(100,70,0,0), 16, 6},
200 {RTE_IPV4(100,80,0,0), 16, 7},
204 * IPv6 forwarding table
207 struct l3fwd_ipv6_route {
208 uint8_t ip[IPV6_ADDR_LEN];
213 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
214 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
215 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
216 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
217 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
218 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
219 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
220 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
221 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
224 #define LPM_MAX_RULES 1024
225 #define LPM6_MAX_RULES 1024
226 #define LPM6_NUMBER_TBL8S (1 << 16)
228 struct rte_lpm6_config lpm6_config = {
229 .max_rules = LPM6_MAX_RULES,
230 .number_tbl8s = LPM6_NUMBER_TBL8S,
234 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
235 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
237 #ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
238 #define TX_LCORE_STAT_UPDATE(s, f, v) ((s)->f += (v))
240 #define TX_LCORE_STAT_UPDATE(s, f, v) do {} while (0)
241 #endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
244 * If number of queued packets reached given threahold, then
245 * send burst of packets on an output interface.
247 static inline uint32_t
248 send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint16_t port)
250 uint32_t fill, len, k, n;
251 struct mbuf_table *txmb;
253 txmb = qconf->tx_mbufs[port];
256 if ((int32_t)(fill = txmb->head - txmb->tail) < 0)
259 if (fill >= thresh) {
260 n = RTE_MIN(len - txmb->tail, fill);
262 k = rte_eth_tx_burst(port, qconf->tx_queue_id[port],
263 txmb->m_table + txmb->tail, (uint16_t)n);
265 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, call, 1);
266 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, send, k);
269 if ((txmb->tail += k) == len)
276 /* Enqueue a single packet, and send burst if queue is filled */
278 send_single_packet(struct rte_mbuf *m, uint16_t port)
280 uint32_t fill, lcore_id, len;
281 struct lcore_queue_conf *qconf;
282 struct mbuf_table *txmb;
284 lcore_id = rte_lcore_id();
285 qconf = &lcore_queue_conf[lcore_id];
287 txmb = qconf->tx_mbufs[port];
290 fill = send_burst(qconf, MAX_PKT_BURST, port);
292 if (fill == len - 1) {
293 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, drop, 1);
294 rte_pktmbuf_free(txmb->m_table[txmb->tail]);
295 if (++txmb->tail == len)
299 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, queue, 1);
300 txmb->m_table[txmb->head] = m;
301 if(++txmb->head == len)
308 reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
309 struct lcore_queue_conf *qconf, uint64_t tms)
311 struct rte_ether_hdr *eth_hdr;
312 struct rte_ip_frag_tbl *tbl;
313 struct rte_ip_frag_death_row *dr;
314 struct rx_queue *rxq;
319 rxq = &qconf->rx_queue_list[queue];
321 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
325 /* if packet is IPv4 */
326 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
327 struct rte_ipv4_hdr *ip_hdr;
330 ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
332 /* if it is a fragmented packet, then try to reassemble. */
333 if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
337 dr = &qconf->death_row;
339 /* prepare mbuf: setup l2_len/l3_len. */
340 m->l2_len = sizeof(*eth_hdr);
341 m->l3_len = sizeof(*ip_hdr);
343 /* process this fragment. */
344 mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
346 /* no packet to send out. */
349 /* we have our packet reassembled. */
352 eth_hdr = rte_pktmbuf_mtod(m,
353 struct rte_ether_hdr *);
354 ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
357 /* update offloading flags */
358 m->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
360 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
362 /* Find destination port */
363 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
364 (enabled_port_mask & 1 << next_hop) != 0) {
368 eth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4);
369 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
370 /* if packet is IPv6 */
371 struct ipv6_extension_fragment *frag_hdr;
372 struct rte_ipv6_hdr *ip_hdr;
374 ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
376 frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(ip_hdr);
378 if (frag_hdr != NULL) {
382 dr = &qconf->death_row;
384 /* prepare mbuf: setup l2_len/l3_len. */
385 m->l2_len = sizeof(*eth_hdr);
386 m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
388 mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
394 eth_hdr = rte_pktmbuf_mtod(m,
395 struct rte_ether_hdr *);
396 ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
400 /* Find destination port */
401 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
403 (enabled_port_mask & 1 << next_hop) != 0) {
407 eth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6);
409 /* if packet wasn't IPv4 or IPv6, it's forwarded to the port it came from */
411 /* 02:00:00:00:00:xx */
412 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
413 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
416 rte_ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
418 send_single_packet(m, dst_port);
421 /* main processing loop */
423 main_loop(__attribute__((unused)) void *dummy)
425 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
427 uint64_t diff_tsc, cur_tsc, prev_tsc;
430 struct lcore_queue_conf *qconf;
431 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
435 lcore_id = rte_lcore_id();
436 qconf = &lcore_queue_conf[lcore_id];
438 if (qconf->n_rx_queue == 0) {
439 RTE_LOG(INFO, IP_RSMBL, "lcore %u has nothing to do\n", lcore_id);
443 RTE_LOG(INFO, IP_RSMBL, "entering main loop on lcore %u\n", lcore_id);
445 for (i = 0; i < qconf->n_rx_queue; i++) {
447 portid = qconf->rx_queue_list[i].portid;
448 RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%u\n", lcore_id,
454 cur_tsc = rte_rdtsc();
457 * TX burst queue drain
459 diff_tsc = cur_tsc - prev_tsc;
460 if (unlikely(diff_tsc > drain_tsc)) {
463 * This could be optimized (use queueid instead of
464 * portid), but it is not called so often
466 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
467 if ((enabled_port_mask & (1 << portid)) != 0)
468 send_burst(qconf, 1, portid);
475 * Read packet from RX queues
477 for (i = 0; i < qconf->n_rx_queue; ++i) {
479 portid = qconf->rx_queue_list[i].portid;
481 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
484 /* Prefetch first packets */
485 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
486 rte_prefetch0(rte_pktmbuf_mtod(
487 pkts_burst[j], void *));
490 /* Prefetch and forward already prefetched packets */
491 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
492 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
493 j + PREFETCH_OFFSET], void *));
494 reassemble(pkts_burst[j], portid,
498 /* Forward remaining prefetched packets */
499 for (; j < nb_rx; j++) {
500 reassemble(pkts_burst[j], portid,
504 rte_ip_frag_free_death_row(&qconf->death_row,
512 print_usage(const char *prgname)
514 printf("%s [EAL options] -- -p PORTMASK [-q NQ]"
515 " [--max-pkt-len PKTLEN]"
516 " [--maxflows=<flows>] [--flowttl=<ttl>[(s|ms)]]\n"
517 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
518 " -q NQ: number of RX queues per lcore\n"
519 " --maxflows=<flows>: optional, maximum number of flows "
521 " --flowttl=<ttl>[(s|ms)]: optional, maximum TTL for each "
527 parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
532 /* parse decimal string */
534 v = strtoul(str, &end, 10);
535 if (errno != 0 || *end != '\0')
538 if (v < min || v > max)
546 parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
551 static const char frmt_sec[] = "s";
552 static const char frmt_msec[] = "ms";
554 /* parse decimal string */
556 v = strtoul(str, &end, 10);
561 if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
563 else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
567 if (v < min || v > max)
575 parse_portmask(const char *portmask)
580 /* parse hexadecimal string */
581 pm = strtoul(portmask, &end, 16);
582 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
592 parse_nqueue(const char *q_arg)
597 printf("%p\n", q_arg);
599 /* parse hexadecimal string */
600 n = strtoul(q_arg, &end, 10);
601 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
605 if (n >= MAX_RX_QUEUE_PER_LCORE)
611 /* Parse the argument given in the command line of the application */
613 parse_args(int argc, char **argv)
618 char *prgname = argv[0];
619 static struct option lgopts[] = {
620 {"max-pkt-len", 1, 0, 0},
621 {"maxflows", 1, 0, 0},
622 {"flowttl", 1, 0, 0},
628 while ((opt = getopt_long(argc, argvopt, "p:q:",
629 lgopts, &option_index)) != EOF) {
634 enabled_port_mask = parse_portmask(optarg);
635 if (enabled_port_mask == 0) {
636 printf("invalid portmask\n");
637 print_usage(prgname);
644 rx_queue_per_lcore = parse_nqueue(optarg);
645 if (rx_queue_per_lcore < 0) {
646 printf("invalid queue number\n");
647 print_usage(prgname);
654 if (!strncmp(lgopts[option_index].name,
656 if ((ret = parse_flow_num(optarg, MIN_FLOW_NUM,
658 &max_flow_num)) != 0) {
659 printf("invalid value: \"%s\" for "
662 lgopts[option_index].name);
663 print_usage(prgname);
668 if (!strncmp(lgopts[option_index].name, "flowttl", 7)) {
669 if ((ret = parse_flow_ttl(optarg, MIN_FLOW_TTL,
671 &max_flow_ttl)) != 0) {
672 printf("invalid value: \"%s\" for "
675 lgopts[option_index].name);
676 print_usage(prgname);
684 print_usage(prgname);
690 argv[optind-1] = prgname;
693 optind = 1; /* reset getopt lib */
698 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
700 char buf[RTE_ETHER_ADDR_FMT_SIZE];
701 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
702 printf("%s%s", name, buf);
705 /* Check the link status of all ports in up to 9s, and print them finally */
707 check_all_ports_link_status(uint32_t port_mask)
709 #define CHECK_INTERVAL 100 /* 100ms */
710 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
712 uint8_t count, all_ports_up, print_flag = 0;
713 struct rte_eth_link link;
715 printf("\nChecking link status");
717 for (count = 0; count <= MAX_CHECK_TIME; count++) {
719 RTE_ETH_FOREACH_DEV(portid) {
720 if ((port_mask & (1 << portid)) == 0)
722 memset(&link, 0, sizeof(link));
723 rte_eth_link_get_nowait(portid, &link);
724 /* print link status if flag set */
725 if (print_flag == 1) {
726 if (link.link_status)
728 "Port%d Link Up. Speed %u Mbps - %s\n",
729 portid, link.link_speed,
730 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
731 ("full-duplex") : ("half-duplex\n"));
733 printf("Port %d Link Down\n", portid);
736 /* clear all_ports_up flag if any link down */
737 if (link.link_status == ETH_LINK_DOWN) {
742 /* after finally printing all link status, get out */
746 if (all_ports_up == 0) {
749 rte_delay_ms(CHECK_INTERVAL);
752 /* set the print_flag if all ports up or timeout */
753 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
761 init_routing_table(void)
764 struct rte_lpm6 *lpm6;
768 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
769 if (socket_lpm[socket]) {
770 lpm = socket_lpm[socket];
771 /* populate the LPM table */
772 for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
773 ret = rte_lpm_add(lpm,
774 l3fwd_ipv4_route_array[i].ip,
775 l3fwd_ipv4_route_array[i].depth,
776 l3fwd_ipv4_route_array[i].if_out);
779 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
784 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv4_BYTES_FMT
787 IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
788 l3fwd_ipv4_route_array[i].depth,
789 l3fwd_ipv4_route_array[i].if_out);
793 if (socket_lpm6[socket]) {
794 lpm6 = socket_lpm6[socket];
795 /* populate the LPM6 table */
796 for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
797 ret = rte_lpm6_add(lpm6,
798 l3fwd_ipv6_route_array[i].ip,
799 l3fwd_ipv6_route_array[i].depth,
800 l3fwd_ipv6_route_array[i].if_out);
803 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
808 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv6_BYTES_FMT
811 IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
812 l3fwd_ipv6_route_array[i].depth,
813 l3fwd_ipv6_route_array[i].if_out);
821 setup_port_tbl(struct lcore_queue_conf *qconf, uint32_t lcore, int socket,
824 struct mbuf_table *mtb;
828 n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
829 sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
831 if ((mtb = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
833 RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
834 "failed to allocate %zu bytes\n",
835 __func__, lcore, port, sz);
840 qconf->tx_mbufs[port] = mtb;
846 setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
850 uint64_t frag_cycles;
851 char buf[RTE_MEMPOOL_NAMESIZE];
853 socket = rte_lcore_to_socket_id(lcore);
854 if (socket == SOCKET_ID_ANY)
857 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
860 if ((rxq->frag_tbl = rte_ip_frag_table_create(max_flow_num,
861 IP_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
863 RTE_LOG(ERR, IP_RSMBL, "ip_frag_tbl_create(%u) on "
864 "lcore: %u for queue: %u failed\n",
865 max_flow_num, lcore, queue);
870 * At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
871 * mbufs could be stored int the fragment table.
872 * Plus, each TX queue can hold up to <max_flow_num> packets.
875 nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
876 nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
877 nb_mbuf *= 2; /* ipv4 and ipv6 */
878 nb_mbuf += nb_rxd + nb_txd;
880 nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
882 snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
884 rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf, MEMPOOL_CACHE_SIZE, 0,
885 MBUF_DATA_SIZE, socket);
886 if (rxq->pool == NULL) {
887 RTE_LOG(ERR, IP_RSMBL,
888 "rte_pktmbuf_pool_create(%s) failed", buf);
900 struct rte_lpm6 *lpm6;
901 struct rte_lpm_config lpm_config;
905 /* traverse through lcores and initialize structures on each socket */
907 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
909 if (rte_lcore_is_enabled(lcore_id) == 0)
912 socket = rte_lcore_to_socket_id(lcore_id);
914 if (socket == SOCKET_ID_ANY)
917 if (socket_lpm[socket] == NULL) {
918 RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
919 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
921 lpm_config.max_rules = LPM_MAX_RULES;
922 lpm_config.number_tbl8s = 256;
923 lpm_config.flags = 0;
925 lpm = rte_lpm_create(buf, socket, &lpm_config);
927 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
930 socket_lpm[socket] = lpm;
933 if (socket_lpm6[socket] == NULL) {
934 RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
935 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
937 lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
939 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
942 socket_lpm6[socket] = lpm6;
950 queue_dump_stat(void)
953 const struct lcore_queue_conf *qconf;
955 for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
956 if (rte_lcore_is_enabled(lcore) == 0)
959 qconf = &lcore_queue_conf[lcore];
960 for (i = 0; i < qconf->n_rx_queue; i++) {
962 fprintf(stdout, " -- lcoreid=%u portid=%u "
964 lcore, qconf->rx_queue_list[i].portid);
965 rte_ip_frag_table_statistics_dump(stdout,
966 qconf->rx_queue_list[i].frag_tbl);
967 fprintf(stdout, "TX bursts:\t%" PRIu64 "\n"
968 "TX packets _queued:\t%" PRIu64 "\n"
969 "TX packets dropped:\t%" PRIu64 "\n"
970 "TX packets send:\t%" PRIu64 "\n",
972 qconf->tx_stat.queue,
974 qconf->tx_stat.send);
980 signal_handler(int signum)
983 if (signum != SIGUSR1)
984 rte_exit(0, "received signal: %d, exiting\n", signum);
988 main(int argc, char **argv)
990 struct lcore_queue_conf *qconf;
991 struct rte_eth_dev_info dev_info;
992 struct rte_eth_txconf *txconf;
993 struct rx_queue *rxq;
997 unsigned lcore_id = 0, rx_lcore_id = 0;
998 uint32_t n_tx_queue, nb_lcores;
1002 ret = rte_eal_init(argc, argv);
1004 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1008 /* parse application arguments (after the EAL ones) */
1009 ret = parse_args(argc, argv);
1011 rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
1013 nb_ports = rte_eth_dev_count_avail();
1015 rte_exit(EXIT_FAILURE, "No ports found!\n");
1017 nb_lcores = rte_lcore_count();
1019 /* initialize structures (mempools, lpm etc.) */
1021 rte_panic("Cannot initialize memory structures!\n");
1023 /* check if portmask has non-existent ports */
1024 if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
1025 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
1027 /* initialize all ports */
1028 RTE_ETH_FOREACH_DEV(portid) {
1029 struct rte_eth_rxconf rxq_conf;
1030 struct rte_eth_conf local_port_conf = port_conf;
1032 /* skip ports that are not enabled */
1033 if ((enabled_port_mask & (1 << portid)) == 0) {
1034 printf("\nSkipping disabled port %d\n", portid);
1038 qconf = &lcore_queue_conf[rx_lcore_id];
1040 /* limit the frame size to the maximum supported by NIC */
1041 rte_eth_dev_info_get(portid, &dev_info);
1042 local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
1043 dev_info.max_rx_pktlen,
1044 local_port_conf.rxmode.max_rx_pkt_len);
1046 /* get the lcore_id for this port */
1047 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1048 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
1051 if (rx_lcore_id >= RTE_MAX_LCORE)
1052 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1054 qconf = &lcore_queue_conf[rx_lcore_id];
1057 socket = rte_lcore_to_socket_id(portid);
1058 if (socket == SOCKET_ID_ANY)
1061 queueid = qconf->n_rx_queue;
1062 rxq = &qconf->rx_queue_list[queueid];
1063 rxq->portid = portid;
1064 rxq->lpm = socket_lpm[socket];
1065 rxq->lpm6 = socket_lpm6[socket];
1067 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1070 rte_exit(EXIT_FAILURE,
1071 "Cannot adjust number of descriptors: err=%d, port=%d\n",
1074 if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
1075 rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
1076 qconf->n_rx_queue++;
1079 printf("Initializing port %d ... ", portid );
1082 n_tx_queue = nb_lcores;
1083 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1084 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1085 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1086 local_port_conf.txmode.offloads |=
1087 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1089 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1090 dev_info.flow_type_rss_offloads;
1091 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1092 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1093 printf("Port %u modified RSS hash function based on hardware support,"
1094 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1096 port_conf.rx_adv_conf.rss_conf.rss_hf,
1097 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1100 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
1104 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1105 "err=%d, port=%d\n",
1109 /* init one RX queue */
1110 rxq_conf = dev_info.default_rxconf;
1111 rxq_conf.offloads = local_port_conf.rxmode.offloads;
1112 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1117 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1118 "err=%d, port=%d\n",
1122 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1123 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1126 /* init one TX queue per couple (lcore,port) */
1128 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1129 if (rte_lcore_is_enabled(lcore_id) == 0)
1132 socket = (int) rte_lcore_to_socket_id(lcore_id);
1134 printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
1137 txconf = &dev_info.default_txconf;
1138 txconf->offloads = local_port_conf.txmode.offloads;
1140 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1143 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1144 "port=%d\n", ret, portid);
1146 qconf = &lcore_queue_conf[lcore_id];
1147 qconf->tx_queue_id[portid] = queueid;
1148 setup_port_tbl(qconf, lcore_id, socket, portid);
1157 RTE_ETH_FOREACH_DEV(portid) {
1158 if ((enabled_port_mask & (1 << portid)) == 0) {
1162 ret = rte_eth_dev_start(portid);
1164 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1167 rte_eth_promiscuous_enable(portid);
1170 if (init_routing_table() < 0)
1171 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1173 check_all_ports_link_status(enabled_port_mask);
1175 signal(SIGUSR1, signal_handler);
1176 signal(SIGTERM, signal_handler);
1177 signal(SIGINT, signal_handler);
1179 /* launch per-lcore init on every lcore */
1180 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1181 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1182 if (rte_eal_wait_lcore(lcore_id) < 0)