doc: use code snippets in sample app guides
[dpdk.git] / examples / ip_fragmentation / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <sys/param.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 #include <stdarg.h>
14 #include <errno.h>
15 #include <getopt.h>
16
17 #include <rte_common.h>
18 #include <rte_byteorder.h>
19 #include <rte_log.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_eal.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_lpm.h>
38 #include <rte_lpm6.h>
39 #include <rte_ip.h>
40 #include <rte_string_fns.h>
41
42 #include <rte_ip_frag.h>
43
44 #define RTE_LOGTYPE_IP_FRAG RTE_LOGTYPE_USER1
45
46 /* allow max jumbo frame 9.5 KB */
47 #define JUMBO_FRAME_MAX_SIZE    0x2600
48
49 #define ROUNDUP_DIV(a, b)       (((a) + (b) - 1) / (b))
50
51 /*
52  * Default byte size for the IPv6 Maximum Transfer Unit (MTU).
53  * This value includes the size of IPv6 header.
54  */
55 #define IPV4_MTU_DEFAULT        RTE_ETHER_MTU
56 #define IPV6_MTU_DEFAULT        RTE_ETHER_MTU
57
58 /*
59  * The overhead from max frame size to MTU.
60  * We have to consider the max possible overhead.
61  */
62 #define MTU_OVERHEAD    \
63         (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
64                 2 * sizeof(struct rte_vlan_hdr))
65
66 /*
67  * Default payload in bytes for the IPv6 packet.
68  */
69 #define IPV4_DEFAULT_PAYLOAD    (IPV4_MTU_DEFAULT - sizeof(struct rte_ipv4_hdr))
70 #define IPV6_DEFAULT_PAYLOAD    (IPV6_MTU_DEFAULT - sizeof(struct rte_ipv6_hdr))
71
72 /*
73  * Max number of fragments per packet expected - defined by config file.
74  */
75 #define MAX_PACKET_FRAG RTE_LIBRTE_IP_FRAG_MAX_FRAG
76
77 #define NB_MBUF   8192
78
79 #define MAX_PKT_BURST   32
80 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
81
82 /* Configure how many packets ahead to prefetch, when reading packets */
83 #define PREFETCH_OFFSET 3
84
85 /*
86  * Configurable number of RX/TX ring descriptors
87  */
88 #define RTE_TEST_RX_DESC_DEFAULT 1024
89 #define RTE_TEST_TX_DESC_DEFAULT 1024
90 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
91 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
92
93 /* ethernet addresses of ports */
94 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
95
96 #ifndef IPv4_BYTES
97 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
98 #define IPv4_BYTES(addr) \
99                 (uint8_t) (((addr) >> 24) & 0xFF),\
100                 (uint8_t) (((addr) >> 16) & 0xFF),\
101                 (uint8_t) (((addr) >> 8) & 0xFF),\
102                 (uint8_t) ((addr) & 0xFF)
103 #endif
104
105 #ifndef IPv6_BYTES
106 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
107                        "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
108 #define IPv6_BYTES(addr) \
109         addr[0],  addr[1], addr[2],  addr[3], \
110         addr[4],  addr[5], addr[6],  addr[7], \
111         addr[8],  addr[9], addr[10], addr[11],\
112         addr[12], addr[13],addr[14], addr[15]
113 #endif
114
115 #define IPV6_ADDR_LEN 16
116
117 /* mask of enabled ports */
118 static int enabled_port_mask = 0;
119
120 static int rx_queue_per_lcore = 1;
121
122 #define MBUF_TABLE_SIZE  (2 * MAX(MAX_PKT_BURST, MAX_PACKET_FRAG))
123
124 struct mbuf_table {
125         uint16_t len;
126         struct rte_mbuf *m_table[MBUF_TABLE_SIZE];
127 };
128
129 struct rx_queue {
130         struct rte_mempool *direct_pool;
131         struct rte_mempool *indirect_pool;
132         struct rte_lpm *lpm;
133         struct rte_lpm6 *lpm6;
134         uint16_t portid;
135 };
136
137 #define MAX_RX_QUEUE_PER_LCORE 16
138 #define MAX_TX_QUEUE_PER_PORT 16
139 struct lcore_queue_conf {
140         uint16_t n_rx_queue;
141         uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
142         struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
143         struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
144 } __rte_cache_aligned;
145 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
146
147 static struct rte_eth_conf port_conf = {
148         .rxmode = {
149                 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
150                 .split_hdr_size = 0,
151                 .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
152                              DEV_RX_OFFLOAD_SCATTER |
153                              DEV_RX_OFFLOAD_JUMBO_FRAME),
154         },
155         .txmode = {
156                 .mq_mode = ETH_MQ_TX_NONE,
157                 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
158                              DEV_TX_OFFLOAD_MULTI_SEGS),
159         },
160 };
161
162 /*
163  * IPv4 forwarding table
164  */
165 struct l3fwd_ipv4_route {
166         uint32_t ip;
167         uint8_t  depth;
168         uint8_t  if_out;
169 };
170
171 /* Default l3fwd_ipv4_route_array table. 8< */
172 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
173                 {RTE_IPV4(100,10,0,0), 16, 0},
174                 {RTE_IPV4(100,20,0,0), 16, 1},
175                 {RTE_IPV4(100,30,0,0), 16, 2},
176                 {RTE_IPV4(100,40,0,0), 16, 3},
177                 {RTE_IPV4(100,50,0,0), 16, 4},
178                 {RTE_IPV4(100,60,0,0), 16, 5},
179                 {RTE_IPV4(100,70,0,0), 16, 6},
180                 {RTE_IPV4(100,80,0,0), 16, 7},
181 };
182 /* >8 End of default l3fwd_ipv4_route_array table */
183
184 /*
185  * IPv6 forwarding table
186  */
187
188 struct l3fwd_ipv6_route {
189         uint8_t ip[IPV6_ADDR_LEN];
190         uint8_t depth;
191         uint8_t if_out;
192 };
193
194 /* Default l3fwd_ipv6_route_array table. 8< */
195 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
196         {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
197         {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
198         {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
199         {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
200         {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
201         {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
202         {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
203         {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
204 };
205 /* >8 End of default l3fwd_ipv6_route_array table. */
206
207 #define LPM_MAX_RULES         1024
208 #define LPM6_MAX_RULES         1024
209 #define LPM6_NUMBER_TBL8S (1 << 16)
210
211 struct rte_lpm6_config lpm6_config = {
212                 .max_rules = LPM6_MAX_RULES,
213                 .number_tbl8s = LPM6_NUMBER_TBL8S,
214                 .flags = 0
215 };
216
217 static struct rte_mempool *socket_direct_pool[RTE_MAX_NUMA_NODES];
218 static struct rte_mempool *socket_indirect_pool[RTE_MAX_NUMA_NODES];
219 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
220 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
221
222 /* Send burst of packets on an output interface */
223 static inline int
224 send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint16_t port)
225 {
226         struct rte_mbuf **m_table;
227         int ret;
228         uint16_t queueid;
229
230         queueid = qconf->tx_queue_id[port];
231         m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
232
233         ret = rte_eth_tx_burst(port, queueid, m_table, n);
234         if (unlikely(ret < n)) {
235                 do {
236                         rte_pktmbuf_free(m_table[ret]);
237                 } while (++ret < n);
238         }
239
240         return 0;
241 }
242
243 static inline void
244 l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
245                 uint8_t queueid, uint16_t port_in)
246 {
247         struct rx_queue *rxq;
248         uint32_t i, len, next_hop;
249         uint16_t port_out, ether_type;
250         int32_t len2;
251         uint64_t ol_flags;
252         const struct rte_ether_hdr *eth;
253
254         ol_flags = 0;
255         rxq = &qconf->rx_queue_list[queueid];
256
257         /* by default, send everything back to the source port */
258         port_out = port_in;
259
260         /* save ether type of the incoming packet */
261         eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
262         ether_type = eth->ether_type;
263
264         /* Remove the Ethernet header and trailer from the input packet */
265         rte_pktmbuf_adj(m, (uint16_t)sizeof(struct rte_ether_hdr));
266
267         /* Build transmission burst */
268         len = qconf->tx_mbufs[port_out].len;
269
270         /* if this is an IPv4 packet */
271         if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
272                 struct rte_ipv4_hdr *ip_hdr;
273                 uint32_t ip_dst;
274                 /* Read the lookup key (i.e. ip_dst) from the input packet */
275                 ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
276                 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
277
278                 /* Find destination port */
279                 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
280                                 (enabled_port_mask & 1 << next_hop) != 0) {
281                         port_out = next_hop;
282
283                         /* Build transmission burst for new port */
284                         len = qconf->tx_mbufs[port_out].len;
285                 }
286
287                 /* if we don't need to do any fragmentation */
288                 if (likely (IPV4_MTU_DEFAULT >= m->pkt_len)) {
289                         qconf->tx_mbufs[port_out].m_table[len] = m;
290                         len2 = 1;
291                 } else {
292                         len2 = rte_ipv4_fragment_packet(m,
293                                 &qconf->tx_mbufs[port_out].m_table[len],
294                                 (uint16_t)(MBUF_TABLE_SIZE - len),
295                                 IPV4_MTU_DEFAULT,
296                                 rxq->direct_pool, rxq->indirect_pool);
297
298                         /* Free input packet */
299                         rte_pktmbuf_free(m);
300
301                         /* request HW to regenerate IPv4 cksum */
302                         ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
303
304                         /* If we fail to fragment the packet */
305                         if (unlikely (len2 < 0))
306                                 return;
307                 }
308         } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
309                 /* if this is an IPv6 packet */
310                 struct rte_ipv6_hdr *ip_hdr;
311
312                 /* Read the lookup key (i.e. ip_dst) from the input packet */
313                 ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
314
315                 /* Find destination port */
316                 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
317                                                 &next_hop) == 0 &&
318                                 (enabled_port_mask & 1 << next_hop) != 0) {
319                         port_out = next_hop;
320
321                         /* Build transmission burst for new port */
322                         len = qconf->tx_mbufs[port_out].len;
323                 }
324
325                 /* if we don't need to do any fragmentation */
326                 if (likely (IPV6_MTU_DEFAULT >= m->pkt_len)) {
327                         qconf->tx_mbufs[port_out].m_table[len] = m;
328                         len2 = 1;
329                 } else {
330                         len2 = rte_ipv6_fragment_packet(m,
331                                 &qconf->tx_mbufs[port_out].m_table[len],
332                                 (uint16_t)(MBUF_TABLE_SIZE - len),
333                                 IPV6_MTU_DEFAULT,
334                                 rxq->direct_pool, rxq->indirect_pool);
335
336                         /* Free input packet */
337                         rte_pktmbuf_free(m);
338
339                         /* If we fail to fragment the packet */
340                         if (unlikely (len2 < 0))
341                                 return;
342                 }
343         }
344         /* else, just forward the packet */
345         else {
346                 qconf->tx_mbufs[port_out].m_table[len] = m;
347                 len2 = 1;
348         }
349
350         for (i = len; i < len + len2; i ++) {
351                 void *d_addr_bytes;
352
353                 m = qconf->tx_mbufs[port_out].m_table[i];
354                 struct rte_ether_hdr *eth_hdr = (struct rte_ether_hdr *)
355                         rte_pktmbuf_prepend(m,
356                                 (uint16_t)sizeof(struct rte_ether_hdr));
357                 if (eth_hdr == NULL) {
358                         rte_panic("No headroom in mbuf.\n");
359                 }
360
361                 m->ol_flags |= ol_flags;
362                 m->l2_len = sizeof(struct rte_ether_hdr);
363
364                 /* 02:00:00:00:00:xx */
365                 d_addr_bytes = &eth_hdr->d_addr.addr_bytes[0];
366                 *((uint64_t *)d_addr_bytes) = 0x000000000002 +
367                         ((uint64_t)port_out << 40);
368
369                 /* src addr */
370                 rte_ether_addr_copy(&ports_eth_addr[port_out],
371                                 &eth_hdr->s_addr);
372                 eth_hdr->ether_type = ether_type;
373         }
374
375         len += len2;
376
377         if (likely(len < MAX_PKT_BURST)) {
378                 qconf->tx_mbufs[port_out].len = (uint16_t)len;
379                 return;
380         }
381
382         /* Transmit packets */
383         send_burst(qconf, (uint16_t)len, port_out);
384         qconf->tx_mbufs[port_out].len = 0;
385 }
386
387 /* main processing loop */
388 static int
389 main_loop(__rte_unused void *dummy)
390 {
391         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
392         unsigned lcore_id;
393         uint64_t prev_tsc, diff_tsc, cur_tsc;
394         int i, j, nb_rx;
395         uint16_t portid;
396         struct lcore_queue_conf *qconf;
397         const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
398
399         prev_tsc = 0;
400
401         lcore_id = rte_lcore_id();
402         qconf = &lcore_queue_conf[lcore_id];
403
404         if (qconf->n_rx_queue == 0) {
405                 RTE_LOG(INFO, IP_FRAG, "lcore %u has nothing to do\n", lcore_id);
406                 return 0;
407         }
408
409         RTE_LOG(INFO, IP_FRAG, "entering main loop on lcore %u\n", lcore_id);
410
411         for (i = 0; i < qconf->n_rx_queue; i++) {
412
413                 portid = qconf->rx_queue_list[i].portid;
414                 RTE_LOG(INFO, IP_FRAG, " -- lcoreid=%u portid=%d\n", lcore_id,
415                                 portid);
416         }
417
418         while (1) {
419
420                 cur_tsc = rte_rdtsc();
421
422                 /*
423                  * TX burst queue drain
424                  */
425                 diff_tsc = cur_tsc - prev_tsc;
426                 if (unlikely(diff_tsc > drain_tsc)) {
427
428                         /*
429                          * This could be optimized (use queueid instead of
430                          * portid), but it is not called so often
431                          */
432                         for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
433                                 if (qconf->tx_mbufs[portid].len == 0)
434                                         continue;
435                                 send_burst(&lcore_queue_conf[lcore_id],
436                                            qconf->tx_mbufs[portid].len,
437                                            portid);
438                                 qconf->tx_mbufs[portid].len = 0;
439                         }
440
441                         prev_tsc = cur_tsc;
442                 }
443
444                 /*
445                  * Read packet from RX queues
446                  */
447                 for (i = 0; i < qconf->n_rx_queue; i++) {
448
449                         portid = qconf->rx_queue_list[i].portid;
450                         nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
451                                                  MAX_PKT_BURST);
452
453                         /* Prefetch first packets */
454                         for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
455                                 rte_prefetch0(rte_pktmbuf_mtod(
456                                                 pkts_burst[j], void *));
457                         }
458
459                         /* Prefetch and forward already prefetched packets */
460                         for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
461                                 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
462                                                 j + PREFETCH_OFFSET], void *));
463                                 l3fwd_simple_forward(pkts_burst[j], qconf, i, portid);
464                         }
465
466                         /* Forward remaining prefetched packets */
467                         for (; j < nb_rx; j++) {
468                                 l3fwd_simple_forward(pkts_burst[j], qconf, i, portid);
469                         }
470                 }
471         }
472 }
473
474 /* display usage */
475 static void
476 print_usage(const char *prgname)
477 {
478         printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
479                "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
480                "  -q NQ: number of queue (=ports) per lcore (default is 1)\n",
481                prgname);
482 }
483
484 static int
485 parse_portmask(const char *portmask)
486 {
487         char *end = NULL;
488         unsigned long pm;
489
490         /* parse hexadecimal string */
491         pm = strtoul(portmask, &end, 16);
492         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
493                 return -1;
494
495         if (pm == 0)
496                 return -1;
497
498         return pm;
499 }
500
501 static int
502 parse_nqueue(const char *q_arg)
503 {
504         char *end = NULL;
505         unsigned long n;
506
507         /* parse hexadecimal string */
508         n = strtoul(q_arg, &end, 10);
509         if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
510                 return -1;
511         if (n == 0)
512                 return -1;
513         if (n >= MAX_RX_QUEUE_PER_LCORE)
514                 return -1;
515
516         return n;
517 }
518
519 /* Parse the argument given in the command line of the application */
520 static int
521 parse_args(int argc, char **argv)
522 {
523         int opt, ret;
524         char **argvopt;
525         int option_index;
526         char *prgname = argv[0];
527         static struct option lgopts[] = {
528                 {NULL, 0, 0, 0}
529         };
530
531         argvopt = argv;
532
533         while ((opt = getopt_long(argc, argvopt, "p:q:",
534                                   lgopts, &option_index)) != EOF) {
535
536                 switch (opt) {
537                 /* portmask */
538                 case 'p':
539                         enabled_port_mask = parse_portmask(optarg);
540                         if (enabled_port_mask < 0) {
541                                 printf("invalid portmask\n");
542                                 print_usage(prgname);
543                                 return -1;
544                         }
545                         break;
546
547                 /* nqueue */
548                 case 'q':
549                         rx_queue_per_lcore = parse_nqueue(optarg);
550                         if (rx_queue_per_lcore < 0) {
551                                 printf("invalid queue number\n");
552                                 print_usage(prgname);
553                                 return -1;
554                         }
555                         break;
556
557                 /* long options */
558                 case 0:
559                         print_usage(prgname);
560                         return -1;
561
562                 default:
563                         print_usage(prgname);
564                         return -1;
565                 }
566         }
567
568         if (enabled_port_mask == 0) {
569                 printf("portmask not specified\n");
570                 print_usage(prgname);
571                 return -1;
572         }
573
574         if (optind >= 0)
575                 argv[optind-1] = prgname;
576
577         ret = optind-1;
578         optind = 1; /* reset getopt lib */
579         return ret;
580 }
581
582 static void
583 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
584 {
585         char buf[RTE_ETHER_ADDR_FMT_SIZE];
586         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
587         printf("%s%s", name, buf);
588 }
589
590 /* Check the link status of all ports in up to 9s, and print them finally */
591 static void
592 check_all_ports_link_status(uint32_t port_mask)
593 {
594 #define CHECK_INTERVAL 100 /* 100ms */
595 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
596         uint16_t portid;
597         uint8_t count, all_ports_up, print_flag = 0;
598         struct rte_eth_link link;
599         int ret;
600         char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
601
602         printf("\nChecking link status");
603         fflush(stdout);
604         for (count = 0; count <= MAX_CHECK_TIME; count++) {
605                 all_ports_up = 1;
606                 RTE_ETH_FOREACH_DEV(portid) {
607                         if ((port_mask & (1 << portid)) == 0)
608                                 continue;
609                         memset(&link, 0, sizeof(link));
610                         ret = rte_eth_link_get_nowait(portid, &link);
611                         if (ret < 0) {
612                                 all_ports_up = 0;
613                                 if (print_flag == 1)
614                                         printf("Port %u link get failed: %s\n",
615                                                 portid, rte_strerror(-ret));
616                                 continue;
617                         }
618                         /* print link status if flag set */
619                         if (print_flag == 1) {
620                                 rte_eth_link_to_str(link_status_text,
621                                         sizeof(link_status_text), &link);
622                                 printf("Port %d %s\n", portid,
623                                        link_status_text);
624                                 continue;
625                         }
626                         /* clear all_ports_up flag if any link down */
627                         if (link.link_status == ETH_LINK_DOWN) {
628                                 all_ports_up = 0;
629                                 break;
630                         }
631                 }
632                 /* after finally printing all link status, get out */
633                 if (print_flag == 1)
634                         break;
635
636                 if (all_ports_up == 0) {
637                         printf(".");
638                         fflush(stdout);
639                         rte_delay_ms(CHECK_INTERVAL);
640                 }
641
642                 /* set the print_flag if all ports up or timeout */
643                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
644                         print_flag = 1;
645                         printf("\ndone\n");
646                 }
647         }
648 }
649
650 /* Check L3 packet type detection capability of the NIC port */
651 static int
652 check_ptype(int portid)
653 {
654         int i, ret;
655         int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
656         uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
657
658         ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
659         if (ret <= 0)
660                 return 0;
661
662         uint32_t ptypes[ret];
663
664         ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
665         for (i = 0; i < ret; ++i) {
666                 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
667                         ptype_l3_ipv4 = 1;
668                 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
669                         ptype_l3_ipv6 = 1;
670         }
671
672         if (ptype_l3_ipv4 == 0)
673                 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
674
675         if (ptype_l3_ipv6 == 0)
676                 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
677
678         if (ptype_l3_ipv4 && ptype_l3_ipv6)
679                 return 1;
680
681         return 0;
682
683 }
684
685 /* Parse packet type of a packet by SW */
686 static inline void
687 parse_ptype(struct rte_mbuf *m)
688 {
689         struct rte_ether_hdr *eth_hdr;
690         uint32_t packet_type = RTE_PTYPE_UNKNOWN;
691         uint16_t ether_type;
692
693         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
694         ether_type = eth_hdr->ether_type;
695         if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
696                 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
697         else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
698                 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
699
700         m->packet_type = packet_type;
701 }
702
703 /* callback function to detect packet type for a queue of a port */
704 static uint16_t
705 cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
706                    struct rte_mbuf *pkts[], uint16_t nb_pkts,
707                    uint16_t max_pkts __rte_unused,
708                    void *user_param __rte_unused)
709 {
710         uint16_t i;
711
712         for (i = 0; i < nb_pkts; ++i)
713                 parse_ptype(pkts[i]);
714
715         return nb_pkts;
716 }
717
718 static int
719 init_routing_table(void)
720 {
721         struct rte_lpm *lpm;
722         struct rte_lpm6 *lpm6;
723         int socket, ret;
724         unsigned i;
725
726         for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
727                 if (socket_lpm[socket]) {
728                         lpm = socket_lpm[socket];
729                         /* populate the LPM table */
730                         for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
731                                 ret = rte_lpm_add(lpm,
732                                         l3fwd_ipv4_route_array[i].ip,
733                                         l3fwd_ipv4_route_array[i].depth,
734                                         l3fwd_ipv4_route_array[i].if_out);
735
736                                 if (ret < 0) {
737                                         RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd "
738                                                 "LPM table\n", i);
739                                         return -1;
740                                 }
741
742                                 RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv4_BYTES_FMT
743                                                 "/%d (port %d)\n",
744                                         socket,
745                                         IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
746                                         l3fwd_ipv4_route_array[i].depth,
747                                         l3fwd_ipv4_route_array[i].if_out);
748                         }
749                 }
750
751                 if (socket_lpm6[socket]) {
752                         lpm6 = socket_lpm6[socket];
753                         /* populate the LPM6 table */
754                         for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
755                                 ret = rte_lpm6_add(lpm6,
756                                         l3fwd_ipv6_route_array[i].ip,
757                                         l3fwd_ipv6_route_array[i].depth,
758                                         l3fwd_ipv6_route_array[i].if_out);
759
760                                 if (ret < 0) {
761                                         RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd "
762                                                 "LPM6 table\n", i);
763                                         return -1;
764                                 }
765
766                                 RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv6_BYTES_FMT
767                                                 "/%d (port %d)\n",
768                                         socket,
769                                         IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
770                                         l3fwd_ipv6_route_array[i].depth,
771                                         l3fwd_ipv6_route_array[i].if_out);
772                         }
773                 }
774         }
775         return 0;
776 }
777
778 static int
779 init_mem(void)
780 {
781         char buf[PATH_MAX];
782         struct rte_mempool *mp;
783         struct rte_lpm *lpm;
784         struct rte_lpm6 *lpm6;
785         struct rte_lpm_config lpm_config;
786         int socket;
787         unsigned lcore_id;
788
789         /* traverse through lcores and initialize structures on each socket */
790
791         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
792
793                 if (rte_lcore_is_enabled(lcore_id) == 0)
794                         continue;
795
796                 socket = rte_lcore_to_socket_id(lcore_id);
797
798                 if (socket == SOCKET_ID_ANY)
799                         socket = 0;
800
801                 if (socket_direct_pool[socket] == NULL) {
802                         RTE_LOG(INFO, IP_FRAG, "Creating direct mempool on socket %i\n",
803                                         socket);
804                         snprintf(buf, sizeof(buf), "pool_direct_%i", socket);
805
806                         mp = rte_pktmbuf_pool_create(buf, NB_MBUF, 32,
807                                 0, RTE_MBUF_DEFAULT_BUF_SIZE, socket);
808                         if (mp == NULL) {
809                                 RTE_LOG(ERR, IP_FRAG, "Cannot create direct mempool\n");
810                                 return -1;
811                         }
812                         socket_direct_pool[socket] = mp;
813                 }
814
815                 if (socket_indirect_pool[socket] == NULL) {
816                         RTE_LOG(INFO, IP_FRAG, "Creating indirect mempool on socket %i\n",
817                                         socket);
818                         snprintf(buf, sizeof(buf), "pool_indirect_%i", socket);
819
820                         mp = rte_pktmbuf_pool_create(buf, NB_MBUF, 32, 0, 0,
821                                 socket);
822                         if (mp == NULL) {
823                                 RTE_LOG(ERR, IP_FRAG, "Cannot create indirect mempool\n");
824                                 return -1;
825                         }
826                         socket_indirect_pool[socket] = mp;
827                 }
828
829                 if (socket_lpm[socket] == NULL) {
830                         RTE_LOG(INFO, IP_FRAG, "Creating LPM table on socket %i\n", socket);
831                         snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
832
833                         lpm_config.max_rules = LPM_MAX_RULES;
834                         lpm_config.number_tbl8s = 256;
835                         lpm_config.flags = 0;
836
837                         lpm = rte_lpm_create(buf, socket, &lpm_config);
838                         if (lpm == NULL) {
839                                 RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
840                                 return -1;
841                         }
842                         socket_lpm[socket] = lpm;
843                 }
844
845                 if (socket_lpm6[socket] == NULL) {
846                         RTE_LOG(INFO, IP_FRAG, "Creating LPM6 table on socket %i\n", socket);
847                         snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
848
849                         lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
850                         if (lpm6 == NULL) {
851                                 RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
852                                 return -1;
853                         }
854                         socket_lpm6[socket] = lpm6;
855                 }
856         }
857
858         return 0;
859 }
860
861 int
862 main(int argc, char **argv)
863 {
864         struct lcore_queue_conf *qconf;
865         struct rte_eth_dev_info dev_info;
866         struct rte_eth_txconf *txconf;
867         struct rx_queue *rxq;
868         int socket, ret;
869         uint16_t nb_ports;
870         uint16_t queueid = 0;
871         unsigned lcore_id = 0, rx_lcore_id = 0;
872         uint32_t n_tx_queue, nb_lcores;
873         uint16_t portid;
874
875         /* init EAL */
876         ret = rte_eal_init(argc, argv);
877         if (ret < 0)
878                 rte_exit(EXIT_FAILURE, "rte_eal_init failed");
879         argc -= ret;
880         argv += ret;
881
882         /* parse application arguments (after the EAL ones) */
883         ret = parse_args(argc, argv);
884         if (ret < 0)
885                 rte_exit(EXIT_FAILURE, "Invalid arguments");
886
887         nb_ports = rte_eth_dev_count_avail();
888         if (nb_ports == 0)
889                 rte_exit(EXIT_FAILURE, "No ports found!\n");
890
891         nb_lcores = rte_lcore_count();
892
893         /* initialize structures (mempools, lpm etc.) */
894         if (init_mem() < 0)
895                 rte_panic("Cannot initialize memory structures!\n");
896
897         /* check if portmask has non-existent ports */
898         if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
899                 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
900
901         /* initialize all ports */
902         RTE_ETH_FOREACH_DEV(portid) {
903                 struct rte_eth_conf local_port_conf = port_conf;
904                 struct rte_eth_rxconf rxq_conf;
905
906                 /* skip ports that are not enabled */
907                 if ((enabled_port_mask & (1 << portid)) == 0) {
908                         printf("Skipping disabled port %d\n", portid);
909                         continue;
910                 }
911
912                 qconf = &lcore_queue_conf[rx_lcore_id];
913
914                 /* limit the frame size to the maximum supported by NIC */
915                 ret = rte_eth_dev_info_get(portid, &dev_info);
916                 if (ret != 0)
917                         rte_exit(EXIT_FAILURE,
918                                 "Error during getting device (port %u) info: %s\n",
919                                 portid, strerror(-ret));
920
921                 local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
922                     dev_info.max_rx_pktlen,
923                     local_port_conf.rxmode.max_rx_pkt_len);
924
925                 /* get the lcore_id for this port */
926                 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
927                        qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
928
929                         rx_lcore_id ++;
930                         if (rx_lcore_id >= RTE_MAX_LCORE)
931                                 rte_exit(EXIT_FAILURE, "Not enough cores\n");
932
933                         qconf = &lcore_queue_conf[rx_lcore_id];
934                 }
935
936                 socket = (int) rte_lcore_to_socket_id(rx_lcore_id);
937                 if (socket == SOCKET_ID_ANY)
938                         socket = 0;
939
940                 rxq = &qconf->rx_queue_list[qconf->n_rx_queue];
941                 rxq->portid = portid;
942                 rxq->direct_pool = socket_direct_pool[socket];
943                 rxq->indirect_pool = socket_indirect_pool[socket];
944                 rxq->lpm = socket_lpm[socket];
945                 rxq->lpm6 = socket_lpm6[socket];
946                 qconf->n_rx_queue++;
947
948                 /* init port */
949                 printf("Initializing port %d on lcore %u...", portid,
950                        rx_lcore_id);
951                 fflush(stdout);
952
953                 n_tx_queue = nb_lcores;
954                 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
955                         n_tx_queue = MAX_TX_QUEUE_PER_PORT;
956                 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
957                                             &local_port_conf);
958                 if (ret < 0) {
959                         printf("\n");
960                         rte_exit(EXIT_FAILURE, "Cannot configure device: "
961                                 "err=%d, port=%d\n",
962                                 ret, portid);
963                 }
964
965                 /* set the mtu to the maximum received packet size */
966                 ret = rte_eth_dev_set_mtu(portid,
967                         local_port_conf.rxmode.max_rx_pkt_len - MTU_OVERHEAD);
968                 if (ret < 0) {
969                         printf("\n");
970                         rte_exit(EXIT_FAILURE, "Set MTU failed: "
971                                 "err=%d, port=%d\n",
972                         ret, portid);
973                 }
974
975                 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
976                                             &nb_txd);
977                 if (ret < 0) {
978                         printf("\n");
979                         rte_exit(EXIT_FAILURE, "Cannot adjust number of "
980                                 "descriptors: err=%d, port=%d\n", ret, portid);
981                 }
982
983                 /* init one RX queue */
984                 rxq_conf = dev_info.default_rxconf;
985                 rxq_conf.offloads = local_port_conf.rxmode.offloads;
986                 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
987                                              socket, &rxq_conf,
988                                              socket_direct_pool[socket]);
989                 if (ret < 0) {
990                         printf("\n");
991                         rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
992                                 "err=%d, port=%d\n",
993                                 ret, portid);
994                 }
995
996                 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
997                 if (ret < 0) {
998                         printf("\n");
999                         rte_exit(EXIT_FAILURE,
1000                                 "rte_eth_macaddr_get: err=%d, port=%d\n",
1001                                 ret, portid);
1002                 }
1003
1004                 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1005                 printf("\n");
1006
1007                 /* init one TX queue per couple (lcore,port) */
1008                 ret = rte_eth_dev_info_get(portid, &dev_info);
1009                 if (ret != 0)
1010                         rte_exit(EXIT_FAILURE,
1011                                 "Error during getting device (port %u) info: %s\n",
1012                                 portid, strerror(-ret));
1013
1014                 queueid = 0;
1015                 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1016                         if (rte_lcore_is_enabled(lcore_id) == 0)
1017                                 continue;
1018
1019                         if (queueid >= dev_info.nb_tx_queues)
1020                                 break;
1021
1022                         socket = (int) rte_lcore_to_socket_id(lcore_id);
1023                         printf("txq=%u,%d ", lcore_id, queueid);
1024                         fflush(stdout);
1025
1026                         txconf = &dev_info.default_txconf;
1027                         txconf->offloads = local_port_conf.txmode.offloads;
1028                         ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1029                                                      socket, txconf);
1030                         if (ret < 0) {
1031                                 printf("\n");
1032                                 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1033                                         "err=%d, port=%d\n", ret, portid);
1034                         }
1035
1036                         qconf = &lcore_queue_conf[lcore_id];
1037                         qconf->tx_queue_id[portid] = queueid;
1038                         queueid++;
1039                 }
1040
1041                 printf("\n");
1042         }
1043
1044         printf("\n");
1045
1046         /* start ports */
1047         RTE_ETH_FOREACH_DEV(portid) {
1048                 if ((enabled_port_mask & (1 << portid)) == 0) {
1049                         continue;
1050                 }
1051                 /* Start device */
1052                 ret = rte_eth_dev_start(portid);
1053                 if (ret < 0)
1054                         rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1055                                 ret, portid);
1056
1057                 ret = rte_eth_promiscuous_enable(portid);
1058                 if (ret != 0)
1059                         rte_exit(EXIT_FAILURE,
1060                                 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
1061                                 rte_strerror(-ret), portid);
1062
1063                 if (check_ptype(portid) == 0) {
1064                         rte_eth_add_rx_callback(portid, 0, cb_parse_ptype, NULL);
1065                         printf("Add Rx callback function to detect L3 packet type by SW :"
1066                                 " port = %d\n", portid);
1067                 }
1068         }
1069
1070         if (init_routing_table() < 0)
1071                 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1072
1073         check_all_ports_link_status(enabled_port_mask);
1074
1075         /* launch per-lcore init on every lcore */
1076         rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
1077         RTE_LCORE_FOREACH_WORKER(lcore_id) {
1078                 if (rte_eal_wait_lcore(lcore_id) < 0)
1079                         return -1;
1080         }
1081
1082         /* clean up the EAL */
1083         rte_eal_cleanup();
1084
1085         return 0;
1086 }