1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
16 #include <sys/param.h>
18 #include <rte_common.h>
19 #include <rte_byteorder.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
24 #include <rte_launch.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
37 #include <rte_malloc.h>
41 #include <rte_string_fns.h>
45 #include <rte_ip_frag.h>
47 #define MAX_PKT_BURST 32
50 #define RTE_LOGTYPE_IP_RSMBL RTE_LOGTYPE_USER1
52 #define MAX_JUMBO_PKT_LEN 9600
54 #define BUF_SIZE RTE_MBUF_DEFAULT_DATAROOM
55 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
58 #define MEMPOOL_CACHE_SIZE 256
60 /* allow max jumbo frame 9.5 KB */
61 #define JUMBO_FRAME_MAX_SIZE 0x2600
63 #define MAX_FLOW_NUM UINT16_MAX
64 #define MIN_FLOW_NUM 1
65 #define DEF_FLOW_NUM 0x1000
67 /* TTL numbers are in ms. */
68 #define MAX_FLOW_TTL (3600 * MS_PER_S)
69 #define MIN_FLOW_TTL 1
70 #define DEF_FLOW_TTL MS_PER_S
72 #define MAX_FRAG_NUM RTE_LIBRTE_IP_FRAG_MAX_FRAG
74 /* Should be power of two. */
75 #define IP_FRAG_TBL_BUCKET_ENTRIES 16
77 static uint32_t max_flow_num = DEF_FLOW_NUM;
78 static uint32_t max_flow_ttl = DEF_FLOW_TTL;
80 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
84 /* Configure how many packets ahead to prefetch, when reading packets */
85 #define PREFETCH_OFFSET 3
88 * Configurable number of RX/TX ring descriptors
90 #define RTE_TEST_RX_DESC_DEFAULT 1024
91 #define RTE_TEST_TX_DESC_DEFAULT 1024
93 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
94 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
96 /* ethernet addresses of ports */
97 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
100 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
101 #define IPv4_BYTES(addr) \
102 (uint8_t) (((addr) >> 24) & 0xFF),\
103 (uint8_t) (((addr) >> 16) & 0xFF),\
104 (uint8_t) (((addr) >> 8) & 0xFF),\
105 (uint8_t) ((addr) & 0xFF)
109 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
110 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
111 #define IPv6_BYTES(addr) \
112 addr[0], addr[1], addr[2], addr[3], \
113 addr[4], addr[5], addr[6], addr[7], \
114 addr[8], addr[9], addr[10], addr[11],\
115 addr[12], addr[13],addr[14], addr[15]
118 #define IPV6_ADDR_LEN 16
120 /* mask of enabled ports */
121 static uint32_t enabled_port_mask = 0;
123 static int rx_queue_per_lcore = 1;
129 struct rte_mbuf *m_table[0];
133 struct rte_ip_frag_tbl *frag_tbl;
134 struct rte_mempool *pool;
136 struct rte_lpm6 *lpm6;
140 struct tx_lcore_stat {
147 #define MAX_RX_QUEUE_PER_LCORE 16
148 #define MAX_TX_QUEUE_PER_PORT 16
149 #define MAX_RX_QUEUE_PER_PORT 128
151 struct lcore_queue_conf {
153 struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
154 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
155 struct rte_ip_frag_death_row death_row;
156 struct mbuf_table *tx_mbufs[RTE_MAX_ETHPORTS];
157 struct tx_lcore_stat tx_stat;
158 } __rte_cache_aligned;
159 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
161 static struct rte_eth_conf port_conf = {
163 .mq_mode = RTE_ETH_MQ_RX_RSS,
164 .mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
167 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
172 .rss_hf = RTE_ETH_RSS_IP,
176 .mq_mode = RTE_ETH_MQ_TX_NONE,
177 .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
178 RTE_ETH_TX_OFFLOAD_MULTI_SEGS),
183 * IPv4 forwarding table
185 struct l3fwd_ipv4_route {
191 /* Default l3fwd_ipv4_route_array table. 8< */
192 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
193 {RTE_IPV4(100,10,0,0), 16, 0},
194 {RTE_IPV4(100,20,0,0), 16, 1},
195 {RTE_IPV4(100,30,0,0), 16, 2},
196 {RTE_IPV4(100,40,0,0), 16, 3},
197 {RTE_IPV4(100,50,0,0), 16, 4},
198 {RTE_IPV4(100,60,0,0), 16, 5},
199 {RTE_IPV4(100,70,0,0), 16, 6},
200 {RTE_IPV4(100,80,0,0), 16, 7},
202 /* >8 End of default l3fwd_ipv4_route_array table. */
205 * IPv6 forwarding table
208 struct l3fwd_ipv6_route {
209 uint8_t ip[IPV6_ADDR_LEN];
214 /* Default l3fwd_ipv6_route_array table. 8< */
215 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
216 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
217 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
218 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
219 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
220 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
221 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
222 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
223 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
225 /* >8 End of default l3fwd_ipv6_route_array table. */
227 #define LPM_MAX_RULES 1024
228 #define LPM6_MAX_RULES 1024
229 #define LPM6_NUMBER_TBL8S (1 << 16)
231 struct rte_lpm6_config lpm6_config = {
232 .max_rules = LPM6_MAX_RULES,
233 .number_tbl8s = LPM6_NUMBER_TBL8S,
237 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
238 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
240 #ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
241 #define TX_LCORE_STAT_UPDATE(s, f, v) ((s)->f += (v))
243 #define TX_LCORE_STAT_UPDATE(s, f, v) do {} while (0)
244 #endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
247 * If number of queued packets reached given threahold, then
248 * send burst of packets on an output interface.
250 static inline uint32_t
251 send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint16_t port)
253 uint32_t fill, len, k, n;
254 struct mbuf_table *txmb;
256 txmb = qconf->tx_mbufs[port];
259 if ((int32_t)(fill = txmb->head - txmb->tail) < 0)
262 if (fill >= thresh) {
263 n = RTE_MIN(len - txmb->tail, fill);
265 k = rte_eth_tx_burst(port, qconf->tx_queue_id[port],
266 txmb->m_table + txmb->tail, (uint16_t)n);
268 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, call, 1);
269 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, send, k);
272 if ((txmb->tail += k) == len)
279 /* Enqueue a single packet, and send burst if queue is filled */
281 send_single_packet(struct rte_mbuf *m, uint16_t port)
283 uint32_t fill, lcore_id, len;
284 struct lcore_queue_conf *qconf;
285 struct mbuf_table *txmb;
287 lcore_id = rte_lcore_id();
288 qconf = &lcore_queue_conf[lcore_id];
290 txmb = qconf->tx_mbufs[port];
293 fill = send_burst(qconf, MAX_PKT_BURST, port);
295 if (fill == len - 1) {
296 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, drop, 1);
297 rte_pktmbuf_free(txmb->m_table[txmb->tail]);
298 if (++txmb->tail == len)
302 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, queue, 1);
303 txmb->m_table[txmb->head] = m;
304 if(++txmb->head == len)
311 reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
312 struct lcore_queue_conf *qconf, uint64_t tms)
314 struct rte_ether_hdr *eth_hdr;
315 struct rte_ip_frag_tbl *tbl;
316 struct rte_ip_frag_death_row *dr;
317 struct rx_queue *rxq;
322 rxq = &qconf->rx_queue_list[queue];
324 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
328 /* if packet is IPv4 */
329 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
330 struct rte_ipv4_hdr *ip_hdr;
333 ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
335 /* if it is a fragmented packet, then try to reassemble. */
336 if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
340 dr = &qconf->death_row;
342 /* prepare mbuf: setup l2_len/l3_len. */
343 m->l2_len = sizeof(*eth_hdr);
344 m->l3_len = sizeof(*ip_hdr);
346 /* process this fragment. */
347 mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
349 /* no packet to send out. */
352 /* we have our packet reassembled. */
355 eth_hdr = rte_pktmbuf_mtod(m,
356 struct rte_ether_hdr *);
357 ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
360 /* update offloading flags */
361 m->ol_flags |= (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM);
363 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
365 /* Find destination port */
366 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
367 (enabled_port_mask & 1 << next_hop) != 0) {
371 eth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4);
372 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
373 /* if packet is IPv6 */
374 struct ipv6_extension_fragment *frag_hdr;
375 struct rte_ipv6_hdr *ip_hdr;
377 ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
379 frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(ip_hdr);
381 if (frag_hdr != NULL) {
385 dr = &qconf->death_row;
387 /* prepare mbuf: setup l2_len/l3_len. */
388 m->l2_len = sizeof(*eth_hdr);
389 m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
391 mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
397 eth_hdr = rte_pktmbuf_mtod(m,
398 struct rte_ether_hdr *);
399 ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
403 /* Find destination port */
404 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
406 (enabled_port_mask & 1 << next_hop) != 0) {
410 eth_hdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6);
412 /* if packet wasn't IPv4 or IPv6, it's forwarded to the port it came from */
414 /* 02:00:00:00:00:xx */
415 d_addr_bytes = ð_hdr->dst_addr.addr_bytes[0];
416 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
419 rte_ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->src_addr);
421 send_single_packet(m, dst_port);
424 /* main processing loop */
426 main_loop(__rte_unused void *dummy)
428 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
430 uint64_t diff_tsc, cur_tsc, prev_tsc;
433 struct lcore_queue_conf *qconf;
434 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
438 lcore_id = rte_lcore_id();
439 qconf = &lcore_queue_conf[lcore_id];
441 if (qconf->n_rx_queue == 0) {
442 RTE_LOG(INFO, IP_RSMBL, "lcore %u has nothing to do\n", lcore_id);
446 RTE_LOG(INFO, IP_RSMBL, "entering main loop on lcore %u\n", lcore_id);
448 for (i = 0; i < qconf->n_rx_queue; i++) {
450 portid = qconf->rx_queue_list[i].portid;
451 RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%u\n", lcore_id,
457 cur_tsc = rte_rdtsc();
460 * TX burst queue drain
462 diff_tsc = cur_tsc - prev_tsc;
463 if (unlikely(diff_tsc > drain_tsc)) {
466 * This could be optimized (use queueid instead of
467 * portid), but it is not called so often
469 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
470 if ((enabled_port_mask & (1 << portid)) != 0)
471 send_burst(qconf, 1, portid);
478 * Read packet from RX queues
480 for (i = 0; i < qconf->n_rx_queue; ++i) {
482 portid = qconf->rx_queue_list[i].portid;
484 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
487 /* Prefetch first packets */
488 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
489 rte_prefetch0(rte_pktmbuf_mtod(
490 pkts_burst[j], void *));
493 /* Prefetch and forward already prefetched packets */
494 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
495 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
496 j + PREFETCH_OFFSET], void *));
497 reassemble(pkts_burst[j], portid,
501 /* Forward remaining prefetched packets */
502 for (; j < nb_rx; j++) {
503 reassemble(pkts_burst[j], portid,
507 rte_ip_frag_free_death_row(&qconf->death_row,
515 print_usage(const char *prgname)
517 printf("%s [EAL options] -- -p PORTMASK [-q NQ]"
518 " [--maxflows=<flows>] [--flowttl=<ttl>[(s|ms)]]\n"
519 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
520 " -q NQ: number of RX queues per lcore\n"
521 " --maxflows=<flows>: optional, maximum number of flows "
523 " --flowttl=<ttl>[(s|ms)]: optional, maximum TTL for each "
529 parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
534 /* parse decimal string */
536 v = strtoul(str, &end, 10);
537 if (errno != 0 || *end != '\0')
540 if (v < min || v > max)
548 parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
553 static const char frmt_sec[] = "s";
554 static const char frmt_msec[] = "ms";
556 /* parse decimal string */
558 v = strtoul(str, &end, 10);
563 if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
565 else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
569 if (v < min || v > max)
577 parse_portmask(const char *portmask)
582 /* parse hexadecimal string */
583 pm = strtoul(portmask, &end, 16);
584 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
591 parse_nqueue(const char *q_arg)
596 printf("%p\n", q_arg);
598 /* parse hexadecimal string */
599 n = strtoul(q_arg, &end, 10);
600 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
604 if (n >= MAX_RX_QUEUE_PER_LCORE)
610 /* Parse the argument given in the command line of the application */
612 parse_args(int argc, char **argv)
617 char *prgname = argv[0];
618 static struct option lgopts[] = {
619 {"maxflows", 1, 0, 0},
620 {"flowttl", 1, 0, 0},
626 while ((opt = getopt_long(argc, argvopt, "p:q:",
627 lgopts, &option_index)) != EOF) {
632 enabled_port_mask = parse_portmask(optarg);
633 if (enabled_port_mask == 0) {
634 printf("invalid portmask\n");
635 print_usage(prgname);
642 rx_queue_per_lcore = parse_nqueue(optarg);
643 if (rx_queue_per_lcore < 0) {
644 printf("invalid queue number\n");
645 print_usage(prgname);
652 if (!strncmp(lgopts[option_index].name,
654 if ((ret = parse_flow_num(optarg, MIN_FLOW_NUM,
656 &max_flow_num)) != 0) {
657 printf("invalid value: \"%s\" for "
660 lgopts[option_index].name);
661 print_usage(prgname);
666 if (!strncmp(lgopts[option_index].name, "flowttl", 7)) {
667 if ((ret = parse_flow_ttl(optarg, MIN_FLOW_TTL,
669 &max_flow_ttl)) != 0) {
670 printf("invalid value: \"%s\" for "
673 lgopts[option_index].name);
674 print_usage(prgname);
682 print_usage(prgname);
688 argv[optind-1] = prgname;
691 optind = 1; /* reset getopt lib */
696 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
698 char buf[RTE_ETHER_ADDR_FMT_SIZE];
699 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
700 printf("%s%s", name, buf);
703 /* Check the link status of all ports in up to 9s, and print them finally */
705 check_all_ports_link_status(uint32_t port_mask)
707 #define CHECK_INTERVAL 100 /* 100ms */
708 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
710 uint8_t count, all_ports_up, print_flag = 0;
711 struct rte_eth_link link;
713 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
715 printf("\nChecking link status");
717 for (count = 0; count <= MAX_CHECK_TIME; count++) {
719 RTE_ETH_FOREACH_DEV(portid) {
720 if ((port_mask & (1 << portid)) == 0)
722 memset(&link, 0, sizeof(link));
723 ret = rte_eth_link_get_nowait(portid, &link);
727 printf("Port %u link get failed: %s\n",
728 portid, rte_strerror(-ret));
731 /* print link status if flag set */
732 if (print_flag == 1) {
733 rte_eth_link_to_str(link_status_text,
734 sizeof(link_status_text), &link);
735 printf("Port %d %s\n", portid,
739 /* clear all_ports_up flag if any link down */
740 if (link.link_status == RTE_ETH_LINK_DOWN) {
745 /* after finally printing all link status, get out */
749 if (all_ports_up == 0) {
752 rte_delay_ms(CHECK_INTERVAL);
755 /* set the print_flag if all ports up or timeout */
756 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
764 init_routing_table(void)
767 struct rte_lpm6 *lpm6;
771 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
772 if (socket_lpm[socket]) {
773 lpm = socket_lpm[socket];
774 /* populate the LPM table */
775 for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
776 ret = rte_lpm_add(lpm,
777 l3fwd_ipv4_route_array[i].ip,
778 l3fwd_ipv4_route_array[i].depth,
779 l3fwd_ipv4_route_array[i].if_out);
782 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
787 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv4_BYTES_FMT
790 IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
791 l3fwd_ipv4_route_array[i].depth,
792 l3fwd_ipv4_route_array[i].if_out);
796 if (socket_lpm6[socket]) {
797 lpm6 = socket_lpm6[socket];
798 /* populate the LPM6 table */
799 for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
800 ret = rte_lpm6_add(lpm6,
801 l3fwd_ipv6_route_array[i].ip,
802 l3fwd_ipv6_route_array[i].depth,
803 l3fwd_ipv6_route_array[i].if_out);
806 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
811 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv6_BYTES_FMT
814 IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
815 l3fwd_ipv6_route_array[i].depth,
816 l3fwd_ipv6_route_array[i].if_out);
824 setup_port_tbl(struct lcore_queue_conf *qconf, uint32_t lcore, int socket,
827 struct mbuf_table *mtb;
831 n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
832 sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
834 if ((mtb = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
836 RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
837 "failed to allocate %zu bytes\n",
838 __func__, lcore, port, sz);
843 qconf->tx_mbufs[port] = mtb;
849 setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
853 uint64_t frag_cycles;
854 char buf[RTE_MEMPOOL_NAMESIZE];
856 socket = rte_lcore_to_socket_id(lcore);
857 if (socket == SOCKET_ID_ANY)
860 /* Each table entry holds information about packet fragmentation. 8< */
861 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
864 if ((rxq->frag_tbl = rte_ip_frag_table_create(max_flow_num,
865 IP_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
867 RTE_LOG(ERR, IP_RSMBL, "ip_frag_tbl_create(%u) on "
868 "lcore: %u for queue: %u failed\n",
869 max_flow_num, lcore, queue);
872 /* >8 End of holding packet fragmentation. */
875 * At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
876 * mbufs could be stored int the fragment table.
877 * Plus, each TX queue can hold up to <max_flow_num> packets.
880 /* mbufs stored int the gragment table. 8< */
881 nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
882 nb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
883 + BUF_SIZE - 1) / BUF_SIZE;
884 nb_mbuf *= 2; /* ipv4 and ipv6 */
885 nb_mbuf += nb_rxd + nb_txd;
887 nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
889 snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
891 rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf, MEMPOOL_CACHE_SIZE, 0,
892 MBUF_DATA_SIZE, socket);
893 if (rxq->pool == NULL) {
894 RTE_LOG(ERR, IP_RSMBL,
895 "rte_pktmbuf_pool_create(%s) failed", buf);
898 /* >8 End of mbufs stored int the fragmentation table. */
908 struct rte_lpm6 *lpm6;
909 struct rte_lpm_config lpm_config;
913 /* traverse through lcores and initialize structures on each socket */
915 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
917 if (rte_lcore_is_enabled(lcore_id) == 0)
920 socket = rte_lcore_to_socket_id(lcore_id);
922 if (socket == SOCKET_ID_ANY)
925 if (socket_lpm[socket] == NULL) {
926 RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
927 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
929 lpm_config.max_rules = LPM_MAX_RULES;
930 lpm_config.number_tbl8s = 256;
931 lpm_config.flags = 0;
933 lpm = rte_lpm_create(buf, socket, &lpm_config);
935 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
938 socket_lpm[socket] = lpm;
941 if (socket_lpm6[socket] == NULL) {
942 RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
943 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
945 lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
947 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
950 socket_lpm6[socket] = lpm6;
958 queue_dump_stat(void)
961 const struct lcore_queue_conf *qconf;
963 for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
964 if (rte_lcore_is_enabled(lcore) == 0)
967 qconf = &lcore_queue_conf[lcore];
968 for (i = 0; i < qconf->n_rx_queue; i++) {
970 fprintf(stdout, " -- lcoreid=%u portid=%u "
972 lcore, qconf->rx_queue_list[i].portid);
973 rte_ip_frag_table_statistics_dump(stdout,
974 qconf->rx_queue_list[i].frag_tbl);
975 fprintf(stdout, "TX bursts:\t%" PRIu64 "\n"
976 "TX packets _queued:\t%" PRIu64 "\n"
977 "TX packets dropped:\t%" PRIu64 "\n"
978 "TX packets send:\t%" PRIu64 "\n",
980 qconf->tx_stat.queue,
982 qconf->tx_stat.send);
988 signal_handler(int signum)
991 if (signum != SIGUSR1)
992 rte_exit(0, "received signal: %d, exiting\n", signum);
996 main(int argc, char **argv)
998 struct lcore_queue_conf *qconf;
999 struct rte_eth_dev_info dev_info;
1000 struct rte_eth_txconf *txconf;
1001 struct rx_queue *rxq;
1005 unsigned lcore_id = 0, rx_lcore_id = 0;
1006 uint32_t n_tx_queue, nb_lcores;
1010 ret = rte_eal_init(argc, argv);
1012 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1016 /* parse application arguments (after the EAL ones) */
1017 ret = parse_args(argc, argv);
1019 rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
1021 nb_ports = rte_eth_dev_count_avail();
1023 rte_exit(EXIT_FAILURE, "No ports found!\n");
1025 nb_lcores = rte_lcore_count();
1027 /* initialize structures (mempools, lpm etc.) */
1029 rte_panic("Cannot initialize memory structures!\n");
1031 /* check if portmask has non-existent ports */
1032 if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
1033 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
1035 /* initialize all ports */
1036 RTE_ETH_FOREACH_DEV(portid) {
1037 struct rte_eth_rxconf rxq_conf;
1038 struct rte_eth_conf local_port_conf = port_conf;
1040 /* skip ports that are not enabled */
1041 if ((enabled_port_mask & (1 << portid)) == 0) {
1042 printf("\nSkipping disabled port %d\n", portid);
1046 qconf = &lcore_queue_conf[rx_lcore_id];
1048 /* limit the frame size to the maximum supported by NIC */
1049 ret = rte_eth_dev_info_get(portid, &dev_info);
1051 rte_exit(EXIT_FAILURE,
1052 "Error during getting device (port %u) info: %s\n",
1053 portid, strerror(-ret));
1055 local_port_conf.rxmode.mtu = RTE_MIN(
1057 local_port_conf.rxmode.mtu);
1059 /* get the lcore_id for this port */
1060 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1061 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
1064 if (rx_lcore_id >= RTE_MAX_LCORE)
1065 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1067 qconf = &lcore_queue_conf[rx_lcore_id];
1070 socket = rte_lcore_to_socket_id(portid);
1071 if (socket == SOCKET_ID_ANY)
1074 queueid = qconf->n_rx_queue;
1075 rxq = &qconf->rx_queue_list[queueid];
1076 rxq->portid = portid;
1077 rxq->lpm = socket_lpm[socket];
1078 rxq->lpm6 = socket_lpm6[socket];
1080 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1083 rte_exit(EXIT_FAILURE,
1084 "Cannot adjust number of descriptors: err=%d, port=%d\n",
1087 if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
1088 rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
1089 qconf->n_rx_queue++;
1092 printf("Initializing port %d ... ", portid );
1095 n_tx_queue = nb_lcores;
1096 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1097 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1098 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1099 local_port_conf.txmode.offloads |=
1100 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1102 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1103 dev_info.flow_type_rss_offloads;
1104 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1105 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1106 printf("Port %u modified RSS hash function based on hardware support,"
1107 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1109 port_conf.rx_adv_conf.rss_conf.rss_hf,
1110 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1113 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
1117 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1118 "err=%d, port=%d\n",
1122 /* init one RX queue */
1123 rxq_conf = dev_info.default_rxconf;
1124 rxq_conf.offloads = local_port_conf.rxmode.offloads;
1125 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1130 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1131 "err=%d, port=%d\n",
1135 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1138 rte_exit(EXIT_FAILURE,
1139 "rte_eth_macaddr_get: err=%d, port=%d\n",
1143 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1146 /* init one TX queue per couple (lcore,port) */
1148 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1149 if (rte_lcore_is_enabled(lcore_id) == 0)
1152 socket = (int) rte_lcore_to_socket_id(lcore_id);
1154 printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
1157 txconf = &dev_info.default_txconf;
1158 txconf->offloads = local_port_conf.txmode.offloads;
1160 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1163 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1164 "port=%d\n", ret, portid);
1166 qconf = &lcore_queue_conf[lcore_id];
1167 qconf->tx_queue_id[portid] = queueid;
1168 setup_port_tbl(qconf, lcore_id, socket, portid);
1177 RTE_ETH_FOREACH_DEV(portid) {
1178 if ((enabled_port_mask & (1 << portid)) == 0) {
1182 ret = rte_eth_dev_start(portid);
1184 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1187 ret = rte_eth_promiscuous_enable(portid);
1189 rte_exit(EXIT_FAILURE,
1190 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
1191 rte_strerror(-ret), portid);
1194 if (init_routing_table() < 0)
1195 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1197 check_all_ports_link_status(enabled_port_mask);
1199 signal(SIGUSR1, signal_handler);
1200 signal(SIGTERM, signal_handler);
1201 signal(SIGINT, signal_handler);
1203 /* launch per-lcore init on every lcore */
1204 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
1205 RTE_LCORE_FOREACH_WORKER(lcore_id) {
1206 if (rte_eal_wait_lcore(lcore_id) < 0)
1210 /* clean up the EAL */