1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/param.h>
12 #include <sys/queue.h>
17 #include <rte_common.h>
18 #include <rte_byteorder.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
40 #include <rte_string_fns.h>
42 #include <rte_ip_frag.h>
44 #define RTE_LOGTYPE_IP_FRAG RTE_LOGTYPE_USER1
46 /* allow max jumbo frame 9.5 KB */
47 #define JUMBO_FRAME_MAX_SIZE 0x2600
49 #define ROUNDUP_DIV(a, b) (((a) + (b) - 1) / (b))
52 * Default byte size for the IPv6 Maximum Transfer Unit (MTU).
53 * This value includes the size of IPv6 header.
55 #define IPV4_MTU_DEFAULT RTE_ETHER_MTU
56 #define IPV6_MTU_DEFAULT RTE_ETHER_MTU
59 * The overhead from max frame size to MTU.
60 * We have to consider the max possible overhead.
62 #define MTU_OVERHEAD \
63 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
64 2 * sizeof(struct rte_vlan_hdr))
67 * Default payload in bytes for the IPv6 packet.
69 #define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct rte_ipv4_hdr))
70 #define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct rte_ipv6_hdr))
73 * Max number of fragments per packet expected - defined by config file.
75 #define MAX_PACKET_FRAG RTE_LIBRTE_IP_FRAG_MAX_FRAG
79 #define MAX_PKT_BURST 32
80 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
82 /* Configure how many packets ahead to prefetch, when reading packets */
83 #define PREFETCH_OFFSET 3
86 * Configurable number of RX/TX ring descriptors
88 #define RTE_TEST_RX_DESC_DEFAULT 1024
89 #define RTE_TEST_TX_DESC_DEFAULT 1024
90 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
91 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
93 /* ethernet addresses of ports */
94 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
97 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
98 #define IPv4_BYTES(addr) \
99 (uint8_t) (((addr) >> 24) & 0xFF),\
100 (uint8_t) (((addr) >> 16) & 0xFF),\
101 (uint8_t) (((addr) >> 8) & 0xFF),\
102 (uint8_t) ((addr) & 0xFF)
106 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
107 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
108 #define IPv6_BYTES(addr) \
109 addr[0], addr[1], addr[2], addr[3], \
110 addr[4], addr[5], addr[6], addr[7], \
111 addr[8], addr[9], addr[10], addr[11],\
112 addr[12], addr[13],addr[14], addr[15]
115 #define IPV6_ADDR_LEN 16
117 /* mask of enabled ports */
118 static int enabled_port_mask = 0;
120 static int rx_queue_per_lcore = 1;
122 #define MBUF_TABLE_SIZE (2 * MAX(MAX_PKT_BURST, MAX_PACKET_FRAG))
126 struct rte_mbuf *m_table[MBUF_TABLE_SIZE];
130 struct rte_mempool *direct_pool;
131 struct rte_mempool *indirect_pool;
133 struct rte_lpm6 *lpm6;
137 #define MAX_RX_QUEUE_PER_LCORE 16
138 #define MAX_TX_QUEUE_PER_PORT 16
139 struct lcore_queue_conf {
141 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
142 struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
143 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
144 } __rte_cache_aligned;
145 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
147 static struct rte_eth_conf port_conf = {
149 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
151 .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
152 DEV_RX_OFFLOAD_SCATTER |
153 DEV_RX_OFFLOAD_JUMBO_FRAME),
156 .mq_mode = ETH_MQ_TX_NONE,
157 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
158 DEV_TX_OFFLOAD_MULTI_SEGS),
163 * IPv4 forwarding table
165 struct l3fwd_ipv4_route {
171 /* Default l3fwd_ipv4_route_array table. 8< */
172 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
173 {RTE_IPV4(100,10,0,0), 16, 0},
174 {RTE_IPV4(100,20,0,0), 16, 1},
175 {RTE_IPV4(100,30,0,0), 16, 2},
176 {RTE_IPV4(100,40,0,0), 16, 3},
177 {RTE_IPV4(100,50,0,0), 16, 4},
178 {RTE_IPV4(100,60,0,0), 16, 5},
179 {RTE_IPV4(100,70,0,0), 16, 6},
180 {RTE_IPV4(100,80,0,0), 16, 7},
182 /* >8 End of default l3fwd_ipv4_route_array table */
185 * IPv6 forwarding table
188 struct l3fwd_ipv6_route {
189 uint8_t ip[IPV6_ADDR_LEN];
194 /* Default l3fwd_ipv6_route_array table. 8< */
195 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
196 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
197 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
198 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
199 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
200 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
201 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
202 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
203 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
205 /* >8 End of default l3fwd_ipv6_route_array table. */
207 #define LPM_MAX_RULES 1024
208 #define LPM6_MAX_RULES 1024
209 #define LPM6_NUMBER_TBL8S (1 << 16)
211 struct rte_lpm6_config lpm6_config = {
212 .max_rules = LPM6_MAX_RULES,
213 .number_tbl8s = LPM6_NUMBER_TBL8S,
217 static struct rte_mempool *socket_direct_pool[RTE_MAX_NUMA_NODES];
218 static struct rte_mempool *socket_indirect_pool[RTE_MAX_NUMA_NODES];
219 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
220 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
222 /* Send burst of packets on an output interface */
224 send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint16_t port)
226 struct rte_mbuf **m_table;
230 queueid = qconf->tx_queue_id[port];
231 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
233 ret = rte_eth_tx_burst(port, queueid, m_table, n);
234 if (unlikely(ret < n)) {
236 rte_pktmbuf_free(m_table[ret]);
244 l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
245 uint8_t queueid, uint16_t port_in)
247 struct rx_queue *rxq;
248 uint32_t i, len, next_hop;
249 uint16_t port_out, ether_type;
252 const struct rte_ether_hdr *eth;
255 rxq = &qconf->rx_queue_list[queueid];
257 /* by default, send everything back to the source port */
260 /* save ether type of the incoming packet */
261 eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
262 ether_type = eth->ether_type;
264 /* Remove the Ethernet header and trailer from the input packet */
265 rte_pktmbuf_adj(m, (uint16_t)sizeof(struct rte_ether_hdr));
267 /* Build transmission burst */
268 len = qconf->tx_mbufs[port_out].len;
270 /* if this is an IPv4 packet */
271 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
272 struct rte_ipv4_hdr *ip_hdr;
274 /* Read the lookup key (i.e. ip_dst) from the input packet */
275 ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
276 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
278 /* Find destination port */
279 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
280 (enabled_port_mask & 1 << next_hop) != 0) {
283 /* Build transmission burst for new port */
284 len = qconf->tx_mbufs[port_out].len;
287 /* if we don't need to do any fragmentation */
288 if (likely (IPV4_MTU_DEFAULT >= m->pkt_len)) {
289 qconf->tx_mbufs[port_out].m_table[len] = m;
292 len2 = rte_ipv4_fragment_packet(m,
293 &qconf->tx_mbufs[port_out].m_table[len],
294 (uint16_t)(MBUF_TABLE_SIZE - len),
296 rxq->direct_pool, rxq->indirect_pool);
298 /* Free input packet */
301 /* request HW to regenerate IPv4 cksum */
302 ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
304 /* If we fail to fragment the packet */
305 if (unlikely (len2 < 0))
308 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
309 /* if this is an IPv6 packet */
310 struct rte_ipv6_hdr *ip_hdr;
312 /* Read the lookup key (i.e. ip_dst) from the input packet */
313 ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
315 /* Find destination port */
316 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
318 (enabled_port_mask & 1 << next_hop) != 0) {
321 /* Build transmission burst for new port */
322 len = qconf->tx_mbufs[port_out].len;
325 /* if we don't need to do any fragmentation */
326 if (likely (IPV6_MTU_DEFAULT >= m->pkt_len)) {
327 qconf->tx_mbufs[port_out].m_table[len] = m;
330 len2 = rte_ipv6_fragment_packet(m,
331 &qconf->tx_mbufs[port_out].m_table[len],
332 (uint16_t)(MBUF_TABLE_SIZE - len),
334 rxq->direct_pool, rxq->indirect_pool);
336 /* Free input packet */
339 /* If we fail to fragment the packet */
340 if (unlikely (len2 < 0))
344 /* else, just forward the packet */
346 qconf->tx_mbufs[port_out].m_table[len] = m;
350 for (i = len; i < len + len2; i ++) {
353 m = qconf->tx_mbufs[port_out].m_table[i];
354 struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
355 rte_pktmbuf_prepend(m,
356 (uint16_t)sizeof(struct rte_ether_hdr));
357 if (eth_hdr == NULL) {
358 rte_panic("No headroom in mbuf.\n");
361 m->ol_flags |= ol_flags;
362 m->l2_len = sizeof(struct rte_ether_hdr);
364 /* 02:00:00:00:00:xx */
365 d_addr_bytes = ð_hdr->dst_addr.addr_bytes[0];
366 *((uint64_t *)d_addr_bytes) = 0x000000000002 +
367 ((uint64_t)port_out << 40);
370 rte_ether_addr_copy(&ports_eth_addr[port_out],
372 eth_hdr->ether_type = ether_type;
377 if (likely(len < MAX_PKT_BURST)) {
378 qconf->tx_mbufs[port_out].len = (uint16_t)len;
382 /* Transmit packets */
383 send_burst(qconf, (uint16_t)len, port_out);
384 qconf->tx_mbufs[port_out].len = 0;
387 /* main processing loop */
389 main_loop(__rte_unused void *dummy)
391 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
393 uint64_t prev_tsc, diff_tsc, cur_tsc;
396 struct lcore_queue_conf *qconf;
397 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
401 lcore_id = rte_lcore_id();
402 qconf = &lcore_queue_conf[lcore_id];
404 if (qconf->n_rx_queue == 0) {
405 RTE_LOG(INFO, IP_FRAG, "lcore %u has nothing to do\n", lcore_id);
409 RTE_LOG(INFO, IP_FRAG, "entering main loop on lcore %u\n", lcore_id);
411 for (i = 0; i < qconf->n_rx_queue; i++) {
413 portid = qconf->rx_queue_list[i].portid;
414 RTE_LOG(INFO, IP_FRAG, " -- lcoreid=%u portid=%d\n", lcore_id,
420 cur_tsc = rte_rdtsc();
423 * TX burst queue drain
425 diff_tsc = cur_tsc - prev_tsc;
426 if (unlikely(diff_tsc > drain_tsc)) {
429 * This could be optimized (use queueid instead of
430 * portid), but it is not called so often
432 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
433 if (qconf->tx_mbufs[portid].len == 0)
435 send_burst(&lcore_queue_conf[lcore_id],
436 qconf->tx_mbufs[portid].len,
438 qconf->tx_mbufs[portid].len = 0;
445 * Read packet from RX queues
447 for (i = 0; i < qconf->n_rx_queue; i++) {
449 portid = qconf->rx_queue_list[i].portid;
450 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
453 /* Prefetch first packets */
454 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
455 rte_prefetch0(rte_pktmbuf_mtod(
456 pkts_burst[j], void *));
459 /* Prefetch and forward already prefetched packets */
460 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
461 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
462 j + PREFETCH_OFFSET], void *));
463 l3fwd_simple_forward(pkts_burst[j], qconf, i, portid);
466 /* Forward remaining prefetched packets */
467 for (; j < nb_rx; j++) {
468 l3fwd_simple_forward(pkts_burst[j], qconf, i, portid);
476 print_usage(const char *prgname)
478 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
479 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
480 " -q NQ: number of queue (=ports) per lcore (default is 1)\n",
485 parse_portmask(const char *portmask)
490 /* parse hexadecimal string */
491 pm = strtoul(portmask, &end, 16);
492 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
502 parse_nqueue(const char *q_arg)
507 /* parse hexadecimal string */
508 n = strtoul(q_arg, &end, 10);
509 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
513 if (n >= MAX_RX_QUEUE_PER_LCORE)
519 /* Parse the argument given in the command line of the application */
521 parse_args(int argc, char **argv)
526 char *prgname = argv[0];
527 static struct option lgopts[] = {
533 while ((opt = getopt_long(argc, argvopt, "p:q:",
534 lgopts, &option_index)) != EOF) {
539 enabled_port_mask = parse_portmask(optarg);
540 if (enabled_port_mask < 0) {
541 printf("invalid portmask\n");
542 print_usage(prgname);
549 rx_queue_per_lcore = parse_nqueue(optarg);
550 if (rx_queue_per_lcore < 0) {
551 printf("invalid queue number\n");
552 print_usage(prgname);
559 print_usage(prgname);
563 print_usage(prgname);
568 if (enabled_port_mask == 0) {
569 printf("portmask not specified\n");
570 print_usage(prgname);
575 argv[optind-1] = prgname;
578 optind = 1; /* reset getopt lib */
583 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
585 char buf[RTE_ETHER_ADDR_FMT_SIZE];
586 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
587 printf("%s%s", name, buf);
590 /* Check the link status of all ports in up to 9s, and print them finally */
592 check_all_ports_link_status(uint32_t port_mask)
594 #define CHECK_INTERVAL 100 /* 100ms */
595 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
597 uint8_t count, all_ports_up, print_flag = 0;
598 struct rte_eth_link link;
600 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
602 printf("\nChecking link status");
604 for (count = 0; count <= MAX_CHECK_TIME; count++) {
606 RTE_ETH_FOREACH_DEV(portid) {
607 if ((port_mask & (1 << portid)) == 0)
609 memset(&link, 0, sizeof(link));
610 ret = rte_eth_link_get_nowait(portid, &link);
614 printf("Port %u link get failed: %s\n",
615 portid, rte_strerror(-ret));
618 /* print link status if flag set */
619 if (print_flag == 1) {
620 rte_eth_link_to_str(link_status_text,
621 sizeof(link_status_text), &link);
622 printf("Port %d %s\n", portid,
626 /* clear all_ports_up flag if any link down */
627 if (link.link_status == ETH_LINK_DOWN) {
632 /* after finally printing all link status, get out */
636 if (all_ports_up == 0) {
639 rte_delay_ms(CHECK_INTERVAL);
642 /* set the print_flag if all ports up or timeout */
643 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
650 /* Check L3 packet type detection capability of the NIC port */
652 check_ptype(int portid)
655 int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
656 uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
658 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
662 uint32_t ptypes[ret];
664 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
665 for (i = 0; i < ret; ++i) {
666 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
668 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
672 if (ptype_l3_ipv4 == 0)
673 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
675 if (ptype_l3_ipv6 == 0)
676 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
678 if (ptype_l3_ipv4 && ptype_l3_ipv6)
685 /* Parse packet type of a packet by SW */
687 parse_ptype(struct rte_mbuf *m)
689 struct rte_ether_hdr *eth_hdr;
690 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
693 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
694 ether_type = eth_hdr->ether_type;
695 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
696 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
697 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
698 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
700 m->packet_type = packet_type;
703 /* callback function to detect packet type for a queue of a port */
705 cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
706 struct rte_mbuf *pkts[], uint16_t nb_pkts,
707 uint16_t max_pkts __rte_unused,
708 void *user_param __rte_unused)
712 for (i = 0; i < nb_pkts; ++i)
713 parse_ptype(pkts[i]);
719 init_routing_table(void)
722 struct rte_lpm6 *lpm6;
726 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
727 if (socket_lpm[socket]) {
728 lpm = socket_lpm[socket];
729 /* populate the LPM table */
730 for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
731 ret = rte_lpm_add(lpm,
732 l3fwd_ipv4_route_array[i].ip,
733 l3fwd_ipv4_route_array[i].depth,
734 l3fwd_ipv4_route_array[i].if_out);
737 RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd "
742 RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv4_BYTES_FMT
745 IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
746 l3fwd_ipv4_route_array[i].depth,
747 l3fwd_ipv4_route_array[i].if_out);
751 if (socket_lpm6[socket]) {
752 lpm6 = socket_lpm6[socket];
753 /* populate the LPM6 table */
754 for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
755 ret = rte_lpm6_add(lpm6,
756 l3fwd_ipv6_route_array[i].ip,
757 l3fwd_ipv6_route_array[i].depth,
758 l3fwd_ipv6_route_array[i].if_out);
761 RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd "
766 RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv6_BYTES_FMT
769 IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
770 l3fwd_ipv6_route_array[i].depth,
771 l3fwd_ipv6_route_array[i].if_out);
782 struct rte_mempool *mp;
784 struct rte_lpm6 *lpm6;
785 struct rte_lpm_config lpm_config;
789 /* traverse through lcores and initialize structures on each socket */
791 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
793 if (rte_lcore_is_enabled(lcore_id) == 0)
796 socket = rte_lcore_to_socket_id(lcore_id);
798 if (socket == SOCKET_ID_ANY)
801 if (socket_direct_pool[socket] == NULL) {
802 RTE_LOG(INFO, IP_FRAG, "Creating direct mempool on socket %i\n",
804 snprintf(buf, sizeof(buf), "pool_direct_%i", socket);
806 mp = rte_pktmbuf_pool_create(buf, NB_MBUF, 32,
807 0, RTE_MBUF_DEFAULT_BUF_SIZE, socket);
809 RTE_LOG(ERR, IP_FRAG, "Cannot create direct mempool\n");
812 socket_direct_pool[socket] = mp;
815 if (socket_indirect_pool[socket] == NULL) {
816 RTE_LOG(INFO, IP_FRAG, "Creating indirect mempool on socket %i\n",
818 snprintf(buf, sizeof(buf), "pool_indirect_%i", socket);
820 mp = rte_pktmbuf_pool_create(buf, NB_MBUF, 32, 0, 0,
823 RTE_LOG(ERR, IP_FRAG, "Cannot create indirect mempool\n");
826 socket_indirect_pool[socket] = mp;
829 if (socket_lpm[socket] == NULL) {
830 RTE_LOG(INFO, IP_FRAG, "Creating LPM table on socket %i\n", socket);
831 snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
833 lpm_config.max_rules = LPM_MAX_RULES;
834 lpm_config.number_tbl8s = 256;
835 lpm_config.flags = 0;
837 lpm = rte_lpm_create(buf, socket, &lpm_config);
839 RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
842 socket_lpm[socket] = lpm;
845 if (socket_lpm6[socket] == NULL) {
846 RTE_LOG(INFO, IP_FRAG, "Creating LPM6 table on socket %i\n", socket);
847 snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
849 lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
851 RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
854 socket_lpm6[socket] = lpm6;
862 main(int argc, char **argv)
864 struct lcore_queue_conf *qconf;
865 struct rte_eth_dev_info dev_info;
866 struct rte_eth_txconf *txconf;
867 struct rx_queue *rxq;
870 uint16_t queueid = 0;
871 unsigned lcore_id = 0, rx_lcore_id = 0;
872 uint32_t n_tx_queue, nb_lcores;
876 ret = rte_eal_init(argc, argv);
878 rte_exit(EXIT_FAILURE, "rte_eal_init failed");
882 /* parse application arguments (after the EAL ones) */
883 ret = parse_args(argc, argv);
885 rte_exit(EXIT_FAILURE, "Invalid arguments");
887 nb_ports = rte_eth_dev_count_avail();
889 rte_exit(EXIT_FAILURE, "No ports found!\n");
891 nb_lcores = rte_lcore_count();
893 /* initialize structures (mempools, lpm etc.) */
895 rte_panic("Cannot initialize memory structures!\n");
897 /* check if portmask has non-existent ports */
898 if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
899 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
901 /* initialize all ports */
902 RTE_ETH_FOREACH_DEV(portid) {
903 struct rte_eth_conf local_port_conf = port_conf;
904 struct rte_eth_rxconf rxq_conf;
906 /* skip ports that are not enabled */
907 if ((enabled_port_mask & (1 << portid)) == 0) {
908 printf("Skipping disabled port %d\n", portid);
912 qconf = &lcore_queue_conf[rx_lcore_id];
914 /* limit the frame size to the maximum supported by NIC */
915 ret = rte_eth_dev_info_get(portid, &dev_info);
917 rte_exit(EXIT_FAILURE,
918 "Error during getting device (port %u) info: %s\n",
919 portid, strerror(-ret));
921 local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
922 dev_info.max_rx_pktlen,
923 local_port_conf.rxmode.max_rx_pkt_len);
925 /* get the lcore_id for this port */
926 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
927 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
930 if (rx_lcore_id >= RTE_MAX_LCORE)
931 rte_exit(EXIT_FAILURE, "Not enough cores\n");
933 qconf = &lcore_queue_conf[rx_lcore_id];
936 socket = (int) rte_lcore_to_socket_id(rx_lcore_id);
937 if (socket == SOCKET_ID_ANY)
940 rxq = &qconf->rx_queue_list[qconf->n_rx_queue];
941 rxq->portid = portid;
942 rxq->direct_pool = socket_direct_pool[socket];
943 rxq->indirect_pool = socket_indirect_pool[socket];
944 rxq->lpm = socket_lpm[socket];
945 rxq->lpm6 = socket_lpm6[socket];
949 printf("Initializing port %d on lcore %u...", portid,
953 n_tx_queue = nb_lcores;
954 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
955 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
956 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
960 rte_exit(EXIT_FAILURE, "Cannot configure device: "
965 /* set the mtu to the maximum received packet size */
966 ret = rte_eth_dev_set_mtu(portid,
967 local_port_conf.rxmode.max_rx_pkt_len - MTU_OVERHEAD);
970 rte_exit(EXIT_FAILURE, "Set MTU failed: "
975 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
979 rte_exit(EXIT_FAILURE, "Cannot adjust number of "
980 "descriptors: err=%d, port=%d\n", ret, portid);
983 /* init one RX queue */
984 rxq_conf = dev_info.default_rxconf;
985 rxq_conf.offloads = local_port_conf.rxmode.offloads;
986 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
988 socket_direct_pool[socket]);
991 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
996 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
999 rte_exit(EXIT_FAILURE,
1000 "rte_eth_macaddr_get: err=%d, port=%d\n",
1004 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1007 /* init one TX queue per couple (lcore,port) */
1008 ret = rte_eth_dev_info_get(portid, &dev_info);
1010 rte_exit(EXIT_FAILURE,
1011 "Error during getting device (port %u) info: %s\n",
1012 portid, strerror(-ret));
1015 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1016 if (rte_lcore_is_enabled(lcore_id) == 0)
1019 if (queueid >= dev_info.nb_tx_queues)
1022 socket = (int) rte_lcore_to_socket_id(lcore_id);
1023 printf("txq=%u,%d ", lcore_id, queueid);
1026 txconf = &dev_info.default_txconf;
1027 txconf->offloads = local_port_conf.txmode.offloads;
1028 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1032 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1033 "err=%d, port=%d\n", ret, portid);
1036 qconf = &lcore_queue_conf[lcore_id];
1037 qconf->tx_queue_id[portid] = queueid;
1047 RTE_ETH_FOREACH_DEV(portid) {
1048 if ((enabled_port_mask & (1 << portid)) == 0) {
1052 ret = rte_eth_dev_start(portid);
1054 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1057 ret = rte_eth_promiscuous_enable(portid);
1059 rte_exit(EXIT_FAILURE,
1060 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
1061 rte_strerror(-ret), portid);
1063 if (check_ptype(portid) == 0) {
1064 rte_eth_add_rx_callback(portid, 0, cb_parse_ptype, NULL);
1065 printf("Add Rx callback function to detect L3 packet type by SW :"
1066 " port = %d\n", portid);
1070 if (init_routing_table() < 0)
1071 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1073 check_all_ports_link_status(enabled_port_mask);
1075 /* launch per-lcore init on every lcore */
1076 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
1077 RTE_LCORE_FOREACH_WORKER(lcore_id) {
1078 if (rte_eal_wait_lcore(lcore_id) < 0)
1082 /* clean up the EAL */