examples/l3fwd: move routes to common header
[dpdk.git] / examples / l3fwd / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15 #include <signal.h>
16 #include <stdbool.h>
17
18 #include <rte_common.h>
19 #include <rte_vect.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_eal.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_mempool.h>
38 #include <rte_mbuf.h>
39 #include <rte_ip.h>
40 #include <rte_tcp.h>
41 #include <rte_udp.h>
42 #include <rte_string_fns.h>
43 #include <rte_cpuflags.h>
44
45 #include <cmdline_parse.h>
46 #include <cmdline_parse_etheraddr.h>
47
48 #include "l3fwd.h"
49 #include "l3fwd_event.h"
50 #include "l3fwd_route.h"
51
52 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE
53 #define MAX_RX_QUEUE_PER_PORT 128
54
55 #define MAX_LCORE_PARAMS 1024
56
57 /* Static global variables used within this file. */
58 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
59 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
60
61 /**< Ports set in promiscuous mode off by default. */
62 static int promiscuous_on;
63
64 /* Select Longest-Prefix or Exact match. */
65 static int l3fwd_lpm_on;
66 static int l3fwd_em_on;
67
68 /* Global variables. */
69
70 static int numa_on = 1; /**< NUMA is enabled by default. */
71 static int parse_ptype; /**< Parse packet type using rx callback, and */
72                         /**< disabled by default */
73 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
74                           /**< by default */
75
76 volatile bool force_quit;
77
78 /* ethernet addresses of ports */
79 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
80 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
81
82 xmm_t val_eth[RTE_MAX_ETHPORTS];
83
84 /* mask of enabled ports */
85 uint32_t enabled_port_mask;
86
87 /* Used only in exact match mode. */
88 int ipv6; /**< ipv6 is false by default. */
89 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
90
91 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
92
93 struct lcore_params {
94         uint16_t port_id;
95         uint8_t queue_id;
96         uint8_t lcore_id;
97 } __rte_cache_aligned;
98
99 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
100 static struct lcore_params lcore_params_array_default[] = {
101         {0, 0, 2},
102         {0, 1, 2},
103         {0, 2, 2},
104         {1, 0, 2},
105         {1, 1, 2},
106         {1, 2, 2},
107         {2, 0, 2},
108         {3, 0, 3},
109         {3, 1, 3},
110 };
111
112 static struct lcore_params * lcore_params = lcore_params_array_default;
113 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
114                                 sizeof(lcore_params_array_default[0]);
115
116 static struct rte_eth_conf port_conf = {
117         .rxmode = {
118                 .mq_mode = ETH_MQ_RX_RSS,
119                 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
120                 .split_hdr_size = 0,
121                 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
122         },
123         .rx_adv_conf = {
124                 .rss_conf = {
125                         .rss_key = NULL,
126                         .rss_hf = ETH_RSS_IP,
127                 },
128         },
129         .txmode = {
130                 .mq_mode = ETH_MQ_TX_NONE,
131         },
132 };
133
134 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
135 static uint8_t lkp_per_socket[NB_SOCKETS];
136
137 struct l3fwd_lkp_mode {
138         void  (*setup)(int);
139         int   (*check_ptype)(int);
140         rte_rx_callback_fn cb_parse_ptype;
141         int   (*main_loop)(void *);
142         void* (*get_ipv4_lookup_struct)(int);
143         void* (*get_ipv6_lookup_struct)(int);
144 };
145
146 static struct l3fwd_lkp_mode l3fwd_lkp;
147
148 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
149         .setup                  = setup_hash,
150         .check_ptype            = em_check_ptype,
151         .cb_parse_ptype         = em_cb_parse_ptype,
152         .main_loop              = em_main_loop,
153         .get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
154         .get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
155 };
156
157 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
158         .setup                  = setup_lpm,
159         .check_ptype            = lpm_check_ptype,
160         .cb_parse_ptype         = lpm_cb_parse_ptype,
161         .main_loop              = lpm_main_loop,
162         .get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
163         .get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
164 };
165
166 /*
167  * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
168  * 198.18.{0-7}.0/24 = Port {0-7}
169  */
170 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
171         {RTE_IPV4(198, 18, 0, 0), 24, 0},
172         {RTE_IPV4(198, 18, 1, 0), 24, 1},
173         {RTE_IPV4(198, 18, 2, 0), 24, 2},
174         {RTE_IPV4(198, 18, 3, 0), 24, 3},
175         {RTE_IPV4(198, 18, 4, 0), 24, 4},
176         {RTE_IPV4(198, 18, 5, 0), 24, 5},
177         {RTE_IPV4(198, 18, 6, 0), 24, 6},
178         {RTE_IPV4(198, 18, 7, 0), 24, 7},
179 };
180
181 /*
182  * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
183  * 2001:200:0:{0-7}::/64 = Port {0-7}
184  */
185 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
186         {{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
187         {{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
188         {{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
189         {{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
190         {{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
191         {{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
192         {{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
193         {{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
194 };
195
196 /*
197  * Setup lookup methods for forwarding.
198  * Currently exact-match and longest-prefix-match
199  * are supported ones.
200  */
201 static void
202 setup_l3fwd_lookup_tables(void)
203 {
204         /* Setup HASH lookup functions. */
205         if (l3fwd_em_on)
206                 l3fwd_lkp = l3fwd_em_lkp;
207         /* Setup LPM lookup functions. */
208         else
209                 l3fwd_lkp = l3fwd_lpm_lkp;
210 }
211
212 static int
213 check_lcore_params(void)
214 {
215         uint8_t queue, lcore;
216         uint16_t i;
217         int socketid;
218
219         for (i = 0; i < nb_lcore_params; ++i) {
220                 queue = lcore_params[i].queue_id;
221                 if (queue >= MAX_RX_QUEUE_PER_PORT) {
222                         printf("invalid queue number: %hhu\n", queue);
223                         return -1;
224                 }
225                 lcore = lcore_params[i].lcore_id;
226                 if (!rte_lcore_is_enabled(lcore)) {
227                         printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
228                         return -1;
229                 }
230                 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
231                         (numa_on == 0)) {
232                         printf("warning: lcore %hhu is on socket %d with numa off \n",
233                                 lcore, socketid);
234                 }
235         }
236         return 0;
237 }
238
239 static int
240 check_port_config(void)
241 {
242         uint16_t portid;
243         uint16_t i;
244
245         for (i = 0; i < nb_lcore_params; ++i) {
246                 portid = lcore_params[i].port_id;
247                 if ((enabled_port_mask & (1 << portid)) == 0) {
248                         printf("port %u is not enabled in port mask\n", portid);
249                         return -1;
250                 }
251                 if (!rte_eth_dev_is_valid_port(portid)) {
252                         printf("port %u is not present on the board\n", portid);
253                         return -1;
254                 }
255         }
256         return 0;
257 }
258
259 static uint8_t
260 get_port_n_rx_queues(const uint16_t port)
261 {
262         int queue = -1;
263         uint16_t i;
264
265         for (i = 0; i < nb_lcore_params; ++i) {
266                 if (lcore_params[i].port_id == port) {
267                         if (lcore_params[i].queue_id == queue+1)
268                                 queue = lcore_params[i].queue_id;
269                         else
270                                 rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
271                                                 " in sequence and must start with 0\n",
272                                                 lcore_params[i].port_id);
273                 }
274         }
275         return (uint8_t)(++queue);
276 }
277
278 static int
279 init_lcore_rx_queues(void)
280 {
281         uint16_t i, nb_rx_queue;
282         uint8_t lcore;
283
284         for (i = 0; i < nb_lcore_params; ++i) {
285                 lcore = lcore_params[i].lcore_id;
286                 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
287                 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
288                         printf("error: too many queues (%u) for lcore: %u\n",
289                                 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
290                         return -1;
291                 } else {
292                         lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
293                                 lcore_params[i].port_id;
294                         lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
295                                 lcore_params[i].queue_id;
296                         lcore_conf[lcore].n_rx_queue++;
297                 }
298         }
299         return 0;
300 }
301
302 /* display usage */
303 static void
304 print_usage(const char *prgname)
305 {
306         fprintf(stderr, "%s [EAL options] --"
307                 " -p PORTMASK"
308                 " [-P]"
309                 " [-E]"
310                 " [-L]"
311                 " --config (port,queue,lcore)[,(port,queue,lcore)]"
312                 " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
313                 " [--enable-jumbo [--max-pkt-len PKTLEN]]"
314                 " [--no-numa]"
315                 " [--hash-entry-num]"
316                 " [--ipv6]"
317                 " [--parse-ptype]"
318                 " [--per-port-pool]"
319                 " [--mode]"
320                 " [--eventq-sched]\n\n"
321
322                 "  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
323                 "  -P : Enable promiscuous mode\n"
324                 "  -E : Enable exact match\n"
325                 "  -L : Enable longest prefix match (default)\n"
326                 "  --config (port,queue,lcore): Rx queue configuration\n"
327                 "  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
328                 "  --enable-jumbo: Enable jumbo frames\n"
329                 "  --max-pkt-len: Under the premise of enabling jumbo,\n"
330                 "                 maximum packet length in decimal (64-9600)\n"
331                 "  --no-numa: Disable numa awareness\n"
332                 "  --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
333                 "  --ipv6: Set if running ipv6 packets\n"
334                 "  --parse-ptype: Set to use software to analyze packet type\n"
335                 "  --per-port-pool: Use separate buffer pool per port\n"
336                 "  --mode: Packet transfer mode for I/O, poll or eventdev\n"
337                 "          Default mode = poll\n"
338                 "  --eventq-sched: Event queue synchronization method\n"
339                 "                  ordered, atomic or parallel.\n"
340                 "                  Default: atomic\n"
341                 "                  Valid only if --mode=eventdev\n"
342                 "  --event-eth-rxqs: Number of ethernet RX queues per device.\n"
343                 "                    Default: 1\n"
344                 "                    Valid only if --mode=eventdev\n\n",
345                 prgname);
346 }
347
348 static int
349 parse_max_pkt_len(const char *pktlen)
350 {
351         char *end = NULL;
352         unsigned long len;
353
354         /* parse decimal string */
355         len = strtoul(pktlen, &end, 10);
356         if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
357                 return -1;
358
359         if (len == 0)
360                 return -1;
361
362         return len;
363 }
364
365 static int
366 parse_portmask(const char *portmask)
367 {
368         char *end = NULL;
369         unsigned long pm;
370
371         /* parse hexadecimal string */
372         pm = strtoul(portmask, &end, 16);
373         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
374                 return 0;
375
376         return pm;
377 }
378
379 static int
380 parse_hash_entry_number(const char *hash_entry_num)
381 {
382         char *end = NULL;
383         unsigned long hash_en;
384         /* parse hexadecimal string */
385         hash_en = strtoul(hash_entry_num, &end, 16);
386         if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
387                 return -1;
388
389         if (hash_en == 0)
390                 return -1;
391
392         return hash_en;
393 }
394
395 static int
396 parse_config(const char *q_arg)
397 {
398         char s[256];
399         const char *p, *p0 = q_arg;
400         char *end;
401         enum fieldnames {
402                 FLD_PORT = 0,
403                 FLD_QUEUE,
404                 FLD_LCORE,
405                 _NUM_FLD
406         };
407         unsigned long int_fld[_NUM_FLD];
408         char *str_fld[_NUM_FLD];
409         int i;
410         unsigned size;
411
412         nb_lcore_params = 0;
413
414         while ((p = strchr(p0,'(')) != NULL) {
415                 ++p;
416                 if((p0 = strchr(p,')')) == NULL)
417                         return -1;
418
419                 size = p0 - p;
420                 if(size >= sizeof(s))
421                         return -1;
422
423                 snprintf(s, sizeof(s), "%.*s", size, p);
424                 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
425                         return -1;
426                 for (i = 0; i < _NUM_FLD; i++){
427                         errno = 0;
428                         int_fld[i] = strtoul(str_fld[i], &end, 0);
429                         if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
430                                 return -1;
431                 }
432                 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
433                         printf("exceeded max number of lcore params: %hu\n",
434                                 nb_lcore_params);
435                         return -1;
436                 }
437                 lcore_params_array[nb_lcore_params].port_id =
438                         (uint8_t)int_fld[FLD_PORT];
439                 lcore_params_array[nb_lcore_params].queue_id =
440                         (uint8_t)int_fld[FLD_QUEUE];
441                 lcore_params_array[nb_lcore_params].lcore_id =
442                         (uint8_t)int_fld[FLD_LCORE];
443                 ++nb_lcore_params;
444         }
445         lcore_params = lcore_params_array;
446         return 0;
447 }
448
449 static void
450 parse_eth_dest(const char *optarg)
451 {
452         uint16_t portid;
453         char *port_end;
454         uint8_t c, *dest, peer_addr[6];
455
456         errno = 0;
457         portid = strtoul(optarg, &port_end, 10);
458         if (errno != 0 || port_end == optarg || *port_end++ != ',')
459                 rte_exit(EXIT_FAILURE,
460                 "Invalid eth-dest: %s", optarg);
461         if (portid >= RTE_MAX_ETHPORTS)
462                 rte_exit(EXIT_FAILURE,
463                 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
464                 portid, RTE_MAX_ETHPORTS);
465
466         if (cmdline_parse_etheraddr(NULL, port_end,
467                 &peer_addr, sizeof(peer_addr)) < 0)
468                 rte_exit(EXIT_FAILURE,
469                 "Invalid ethernet address: %s\n",
470                 port_end);
471         dest = (uint8_t *)&dest_eth_addr[portid];
472         for (c = 0; c < 6; c++)
473                 dest[c] = peer_addr[c];
474         *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
475 }
476
477 static void
478 parse_mode(const char *optarg)
479 {
480         struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
481
482         if (!strcmp(optarg, "poll"))
483                 evt_rsrc->enabled = false;
484         else if (!strcmp(optarg, "eventdev"))
485                 evt_rsrc->enabled = true;
486 }
487
488 static void
489 parse_eventq_sched(const char *optarg)
490 {
491         struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
492
493         if (!strcmp(optarg, "ordered"))
494                 evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
495         if (!strcmp(optarg, "atomic"))
496                 evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
497         if (!strcmp(optarg, "parallel"))
498                 evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
499 }
500
501 static void
502 parse_event_eth_rx_queues(const char *eth_rx_queues)
503 {
504         struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
505         char *end = NULL;
506         uint8_t num_eth_rx_queues;
507
508         /* parse decimal string */
509         num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
510         if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
511                 return;
512
513         if (num_eth_rx_queues == 0)
514                 return;
515
516         evt_rsrc->eth_rx_queues = num_eth_rx_queues;
517 }
518
519 #define MAX_JUMBO_PKT_LEN  9600
520
521 static const char short_options[] =
522         "p:"  /* portmask */
523         "P"   /* promiscuous */
524         "L"   /* enable long prefix match */
525         "E"   /* enable exact match */
526         ;
527
528 #define CMD_LINE_OPT_CONFIG "config"
529 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
530 #define CMD_LINE_OPT_NO_NUMA "no-numa"
531 #define CMD_LINE_OPT_IPV6 "ipv6"
532 #define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
533 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
534 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
535 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
536 #define CMD_LINE_OPT_MODE "mode"
537 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
538 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
539 enum {
540         /* long options mapped to a short option */
541
542         /* first long only option value must be >= 256, so that we won't
543          * conflict with short options */
544         CMD_LINE_OPT_MIN_NUM = 256,
545         CMD_LINE_OPT_CONFIG_NUM,
546         CMD_LINE_OPT_ETH_DEST_NUM,
547         CMD_LINE_OPT_NO_NUMA_NUM,
548         CMD_LINE_OPT_IPV6_NUM,
549         CMD_LINE_OPT_ENABLE_JUMBO_NUM,
550         CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
551         CMD_LINE_OPT_PARSE_PTYPE_NUM,
552         CMD_LINE_OPT_PARSE_PER_PORT_POOL,
553         CMD_LINE_OPT_MODE_NUM,
554         CMD_LINE_OPT_EVENTQ_SYNC_NUM,
555         CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
556 };
557
558 static const struct option lgopts[] = {
559         {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
560         {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
561         {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
562         {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
563         {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
564         {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
565         {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
566         {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
567         {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
568         {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
569         {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
570                                         CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
571         {NULL, 0, 0, 0}
572 };
573
574 /*
575  * This expression is used to calculate the number of mbufs needed
576  * depending on user input, taking  into account memory for rx and
577  * tx hardware rings, cache per lcore and mtable per port per lcore.
578  * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
579  * value of 8192
580  */
581 #define NB_MBUF(nports) RTE_MAX(        \
582         (nports*nb_rx_queue*nb_rxd +            \
583         nports*nb_lcores*MAX_PKT_BURST +        \
584         nports*n_tx_queue*nb_txd +              \
585         nb_lcores*MEMPOOL_CACHE_SIZE),          \
586         (unsigned)8192)
587
588 /* Parse the argument given in the command line of the application */
589 static int
590 parse_args(int argc, char **argv)
591 {
592         int opt, ret;
593         char **argvopt;
594         int option_index;
595         char *prgname = argv[0];
596         uint8_t lcore_params = 0;
597         uint8_t eventq_sched = 0;
598         uint8_t eth_rx_q = 0;
599         struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
600
601         argvopt = argv;
602
603         /* Error or normal output strings. */
604         while ((opt = getopt_long(argc, argvopt, short_options,
605                                 lgopts, &option_index)) != EOF) {
606
607                 switch (opt) {
608                 /* portmask */
609                 case 'p':
610                         enabled_port_mask = parse_portmask(optarg);
611                         if (enabled_port_mask == 0) {
612                                 fprintf(stderr, "Invalid portmask\n");
613                                 print_usage(prgname);
614                                 return -1;
615                         }
616                         break;
617
618                 case 'P':
619                         promiscuous_on = 1;
620                         break;
621
622                 case 'E':
623                         l3fwd_em_on = 1;
624                         break;
625
626                 case 'L':
627                         l3fwd_lpm_on = 1;
628                         break;
629
630                 /* long options */
631                 case CMD_LINE_OPT_CONFIG_NUM:
632                         ret = parse_config(optarg);
633                         if (ret) {
634                                 fprintf(stderr, "Invalid config\n");
635                                 print_usage(prgname);
636                                 return -1;
637                         }
638                         lcore_params = 1;
639                         break;
640
641                 case CMD_LINE_OPT_ETH_DEST_NUM:
642                         parse_eth_dest(optarg);
643                         break;
644
645                 case CMD_LINE_OPT_NO_NUMA_NUM:
646                         numa_on = 0;
647                         break;
648
649                 case CMD_LINE_OPT_IPV6_NUM:
650                         ipv6 = 1;
651                         break;
652
653                 case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
654                         const struct option lenopts = {
655                                 "max-pkt-len", required_argument, 0, 0
656                         };
657
658                         port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
659                         port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
660
661                         /*
662                          * if no max-pkt-len set, use the default
663                          * value RTE_ETHER_MAX_LEN.
664                          */
665                         if (getopt_long(argc, argvopt, "",
666                                         &lenopts, &option_index) == 0) {
667                                 ret = parse_max_pkt_len(optarg);
668                                 if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
669                                         fprintf(stderr,
670                                                 "invalid maximum packet length\n");
671                                         print_usage(prgname);
672                                         return -1;
673                                 }
674                                 port_conf.rxmode.max_rx_pkt_len = ret;
675                         }
676                         break;
677                 }
678
679                 case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
680                         ret = parse_hash_entry_number(optarg);
681                         if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
682                                 hash_entry_number = ret;
683                         } else {
684                                 fprintf(stderr, "invalid hash entry number\n");
685                                 print_usage(prgname);
686                                 return -1;
687                         }
688                         break;
689
690                 case CMD_LINE_OPT_PARSE_PTYPE_NUM:
691                         printf("soft parse-ptype is enabled\n");
692                         parse_ptype = 1;
693                         break;
694
695                 case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
696                         printf("per port buffer pool is enabled\n");
697                         per_port_pool = 1;
698                         break;
699
700                 case CMD_LINE_OPT_MODE_NUM:
701                         parse_mode(optarg);
702                         break;
703
704                 case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
705                         parse_eventq_sched(optarg);
706                         eventq_sched = 1;
707                         break;
708
709                 case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
710                         parse_event_eth_rx_queues(optarg);
711                         eth_rx_q = 1;
712                         break;
713
714                 default:
715                         print_usage(prgname);
716                         return -1;
717                 }
718         }
719
720         /* If both LPM and EM are selected, return error. */
721         if (l3fwd_lpm_on && l3fwd_em_on) {
722                 fprintf(stderr, "LPM and EM are mutually exclusive, select only one\n");
723                 return -1;
724         }
725
726         if (evt_rsrc->enabled && lcore_params) {
727                 fprintf(stderr, "lcore config is not valid when event mode is selected\n");
728                 return -1;
729         }
730
731         if (!evt_rsrc->enabled && eth_rx_q) {
732                 fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
733                 return -1;
734         }
735
736         if (!evt_rsrc->enabled && eventq_sched) {
737                 fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
738                 return -1;
739         }
740
741         /*
742          * Nothing is selected, pick longest-prefix match
743          * as default match.
744          */
745         if (!l3fwd_lpm_on && !l3fwd_em_on) {
746                 fprintf(stderr, "LPM or EM none selected, default LPM on\n");
747                 l3fwd_lpm_on = 1;
748         }
749
750         /*
751          * ipv6 and hash flags are valid only for
752          * exact macth, reset them to default for
753          * longest-prefix match.
754          */
755         if (l3fwd_lpm_on) {
756                 ipv6 = 0;
757                 hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
758         }
759
760         if (optind >= 0)
761                 argv[optind-1] = prgname;
762
763         ret = optind-1;
764         optind = 1; /* reset getopt lib */
765         return ret;
766 }
767
768 static void
769 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
770 {
771         char buf[RTE_ETHER_ADDR_FMT_SIZE];
772         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
773         printf("%s%s", name, buf);
774 }
775
776 int
777 init_mem(uint16_t portid, unsigned int nb_mbuf)
778 {
779         struct lcore_conf *qconf;
780         int socketid;
781         unsigned lcore_id;
782         char s[64];
783
784         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
785                 if (rte_lcore_is_enabled(lcore_id) == 0)
786                         continue;
787
788                 if (numa_on)
789                         socketid = rte_lcore_to_socket_id(lcore_id);
790                 else
791                         socketid = 0;
792
793                 if (socketid >= NB_SOCKETS) {
794                         rte_exit(EXIT_FAILURE,
795                                 "Socket %d of lcore %u is out of range %d\n",
796                                 socketid, lcore_id, NB_SOCKETS);
797                 }
798
799                 if (pktmbuf_pool[portid][socketid] == NULL) {
800                         snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
801                                  portid, socketid);
802                         pktmbuf_pool[portid][socketid] =
803                                 rte_pktmbuf_pool_create(s, nb_mbuf,
804                                         MEMPOOL_CACHE_SIZE, 0,
805                                         RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
806                         if (pktmbuf_pool[portid][socketid] == NULL)
807                                 rte_exit(EXIT_FAILURE,
808                                         "Cannot init mbuf pool on socket %d\n",
809                                         socketid);
810                         else
811                                 printf("Allocated mbuf pool on socket %d\n",
812                                         socketid);
813
814                         /* Setup either LPM or EM(f.e Hash). But, only once per
815                          * available socket.
816                          */
817                         if (!lkp_per_socket[socketid]) {
818                                 l3fwd_lkp.setup(socketid);
819                                 lkp_per_socket[socketid] = 1;
820                         }
821                 }
822                 qconf = &lcore_conf[lcore_id];
823                 qconf->ipv4_lookup_struct =
824                         l3fwd_lkp.get_ipv4_lookup_struct(socketid);
825                 qconf->ipv6_lookup_struct =
826                         l3fwd_lkp.get_ipv6_lookup_struct(socketid);
827         }
828         return 0;
829 }
830
831 /* Check the link status of all ports in up to 9s, and print them finally */
832 static void
833 check_all_ports_link_status(uint32_t port_mask)
834 {
835 #define CHECK_INTERVAL 100 /* 100ms */
836 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
837         uint16_t portid;
838         uint8_t count, all_ports_up, print_flag = 0;
839         struct rte_eth_link link;
840         int ret;
841         char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
842
843         printf("\nChecking link status");
844         fflush(stdout);
845         for (count = 0; count <= MAX_CHECK_TIME; count++) {
846                 if (force_quit)
847                         return;
848                 all_ports_up = 1;
849                 RTE_ETH_FOREACH_DEV(portid) {
850                         if (force_quit)
851                                 return;
852                         if ((port_mask & (1 << portid)) == 0)
853                                 continue;
854                         memset(&link, 0, sizeof(link));
855                         ret = rte_eth_link_get_nowait(portid, &link);
856                         if (ret < 0) {
857                                 all_ports_up = 0;
858                                 if (print_flag == 1)
859                                         printf("Port %u link get failed: %s\n",
860                                                 portid, rte_strerror(-ret));
861                                 continue;
862                         }
863                         /* print link status if flag set */
864                         if (print_flag == 1) {
865                                 rte_eth_link_to_str(link_status_text,
866                                         sizeof(link_status_text), &link);
867                                 printf("Port %d %s\n", portid,
868                                        link_status_text);
869                                 continue;
870                         }
871                         /* clear all_ports_up flag if any link down */
872                         if (link.link_status == ETH_LINK_DOWN) {
873                                 all_ports_up = 0;
874                                 break;
875                         }
876                 }
877                 /* after finally printing all link status, get out */
878                 if (print_flag == 1)
879                         break;
880
881                 if (all_ports_up == 0) {
882                         printf(".");
883                         fflush(stdout);
884                         rte_delay_ms(CHECK_INTERVAL);
885                 }
886
887                 /* set the print_flag if all ports up or timeout */
888                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
889                         print_flag = 1;
890                         printf("done\n");
891                 }
892         }
893 }
894
895 static void
896 signal_handler(int signum)
897 {
898         if (signum == SIGINT || signum == SIGTERM) {
899                 printf("\n\nSignal %d received, preparing to exit...\n",
900                                 signum);
901                 force_quit = true;
902         }
903 }
904
905 static int
906 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
907 {
908         if (parse_ptype) {
909                 printf("Port %d: softly parse packet type info\n", portid);
910                 if (rte_eth_add_rx_callback(portid, queueid,
911                                             l3fwd_lkp.cb_parse_ptype,
912                                             NULL))
913                         return 1;
914
915                 printf("Failed to add rx callback: port=%d\n", portid);
916                 return 0;
917         }
918
919         if (l3fwd_lkp.check_ptype(portid))
920                 return 1;
921
922         printf("port %d cannot parse packet type, please add --%s\n",
923                portid, CMD_LINE_OPT_PARSE_PTYPE);
924         return 0;
925 }
926
927 static void
928 l3fwd_poll_resource_setup(void)
929 {
930         uint8_t nb_rx_queue, queue, socketid;
931         struct rte_eth_dev_info dev_info;
932         uint32_t n_tx_queue, nb_lcores;
933         struct rte_eth_txconf *txconf;
934         struct lcore_conf *qconf;
935         uint16_t queueid, portid;
936         unsigned int nb_ports;
937         unsigned int lcore_id;
938         int ret;
939
940         if (check_lcore_params() < 0)
941                 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
942
943         ret = init_lcore_rx_queues();
944         if (ret < 0)
945                 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
946
947         nb_ports = rte_eth_dev_count_avail();
948
949         if (check_port_config() < 0)
950                 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
951
952         nb_lcores = rte_lcore_count();
953
954         /* initialize all ports */
955         RTE_ETH_FOREACH_DEV(portid) {
956                 struct rte_eth_conf local_port_conf = port_conf;
957
958                 /* skip ports that are not enabled */
959                 if ((enabled_port_mask & (1 << portid)) == 0) {
960                         printf("\nSkipping disabled port %d\n", portid);
961                         continue;
962                 }
963
964                 /* init port */
965                 printf("Initializing port %d ... ", portid );
966                 fflush(stdout);
967
968                 nb_rx_queue = get_port_n_rx_queues(portid);
969                 n_tx_queue = nb_lcores;
970                 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
971                         n_tx_queue = MAX_TX_QUEUE_PER_PORT;
972                 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
973                         nb_rx_queue, (unsigned)n_tx_queue );
974
975                 ret = rte_eth_dev_info_get(portid, &dev_info);
976                 if (ret != 0)
977                         rte_exit(EXIT_FAILURE,
978                                 "Error during getting device (port %u) info: %s\n",
979                                 portid, strerror(-ret));
980
981                 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
982                         local_port_conf.txmode.offloads |=
983                                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
984
985                 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
986                         dev_info.flow_type_rss_offloads;
987                 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
988                                 port_conf.rx_adv_conf.rss_conf.rss_hf) {
989                         printf("Port %u modified RSS hash function based on hardware support,"
990                                 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
991                                 portid,
992                                 port_conf.rx_adv_conf.rss_conf.rss_hf,
993                                 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
994                 }
995
996                 ret = rte_eth_dev_configure(portid, nb_rx_queue,
997                                         (uint16_t)n_tx_queue, &local_port_conf);
998                 if (ret < 0)
999                         rte_exit(EXIT_FAILURE,
1000                                 "Cannot configure device: err=%d, port=%d\n",
1001                                 ret, portid);
1002
1003                 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1004                                                        &nb_txd);
1005                 if (ret < 0)
1006                         rte_exit(EXIT_FAILURE,
1007                                  "Cannot adjust number of descriptors: err=%d, "
1008                                  "port=%d\n", ret, portid);
1009
1010                 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1011                 if (ret < 0)
1012                         rte_exit(EXIT_FAILURE,
1013                                  "Cannot get MAC address: err=%d, port=%d\n",
1014                                  ret, portid);
1015
1016                 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1017                 printf(", ");
1018                 print_ethaddr("Destination:",
1019                         (const struct rte_ether_addr *)&dest_eth_addr[portid]);
1020                 printf(", ");
1021
1022                 /*
1023                  * prepare src MACs for each port.
1024                  */
1025                 rte_ether_addr_copy(&ports_eth_addr[portid],
1026                         (struct rte_ether_addr *)(val_eth + portid) + 1);
1027
1028                 /* init memory */
1029                 if (!per_port_pool) {
1030                         /* portid = 0; this is *not* signifying the first port,
1031                          * rather, it signifies that portid is ignored.
1032                          */
1033                         ret = init_mem(0, NB_MBUF(nb_ports));
1034                 } else {
1035                         ret = init_mem(portid, NB_MBUF(1));
1036                 }
1037                 if (ret < 0)
1038                         rte_exit(EXIT_FAILURE, "init_mem failed\n");
1039
1040                 /* init one TX queue per couple (lcore,port) */
1041                 queueid = 0;
1042                 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1043                         if (rte_lcore_is_enabled(lcore_id) == 0)
1044                                 continue;
1045
1046                         if (numa_on)
1047                                 socketid =
1048                                 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1049                         else
1050                                 socketid = 0;
1051
1052                         printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1053                         fflush(stdout);
1054
1055                         txconf = &dev_info.default_txconf;
1056                         txconf->offloads = local_port_conf.txmode.offloads;
1057                         ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1058                                                      socketid, txconf);
1059                         if (ret < 0)
1060                                 rte_exit(EXIT_FAILURE,
1061                                         "rte_eth_tx_queue_setup: err=%d, "
1062                                         "port=%d\n", ret, portid);
1063
1064                         qconf = &lcore_conf[lcore_id];
1065                         qconf->tx_queue_id[portid] = queueid;
1066                         queueid++;
1067
1068                         qconf->tx_port_id[qconf->n_tx_port] = portid;
1069                         qconf->n_tx_port++;
1070                 }
1071                 printf("\n");
1072         }
1073
1074         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1075                 if (rte_lcore_is_enabled(lcore_id) == 0)
1076                         continue;
1077                 qconf = &lcore_conf[lcore_id];
1078                 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1079                 fflush(stdout);
1080                 /* init RX queues */
1081                 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1082                         struct rte_eth_rxconf rxq_conf;
1083
1084                         portid = qconf->rx_queue_list[queue].port_id;
1085                         queueid = qconf->rx_queue_list[queue].queue_id;
1086
1087                         if (numa_on)
1088                                 socketid =
1089                                 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1090                         else
1091                                 socketid = 0;
1092
1093                         printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1094                         fflush(stdout);
1095
1096                         ret = rte_eth_dev_info_get(portid, &dev_info);
1097                         if (ret != 0)
1098                                 rte_exit(EXIT_FAILURE,
1099                                         "Error during getting device (port %u) info: %s\n",
1100                                         portid, strerror(-ret));
1101
1102                         rxq_conf = dev_info.default_rxconf;
1103                         rxq_conf.offloads = port_conf.rxmode.offloads;
1104                         if (!per_port_pool)
1105                                 ret = rte_eth_rx_queue_setup(portid, queueid,
1106                                                 nb_rxd, socketid,
1107                                                 &rxq_conf,
1108                                                 pktmbuf_pool[0][socketid]);
1109                         else
1110                                 ret = rte_eth_rx_queue_setup(portid, queueid,
1111                                                 nb_rxd, socketid,
1112                                                 &rxq_conf,
1113                                                 pktmbuf_pool[portid][socketid]);
1114                         if (ret < 0)
1115                                 rte_exit(EXIT_FAILURE,
1116                                 "rte_eth_rx_queue_setup: err=%d, port=%d\n",
1117                                 ret, portid);
1118                 }
1119         }
1120 }
1121
1122 static inline int
1123 l3fwd_service_enable(uint32_t service_id)
1124 {
1125         uint8_t min_service_count = UINT8_MAX;
1126         uint32_t slcore_array[RTE_MAX_LCORE];
1127         unsigned int slcore = 0;
1128         uint8_t service_count;
1129         int32_t slcore_count;
1130
1131         if (!rte_service_lcore_count())
1132                 return -ENOENT;
1133
1134         slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1135         if (slcore_count < 0)
1136                 return -ENOENT;
1137         /* Get the core which has least number of services running. */
1138         while (slcore_count--) {
1139                 /* Reset default mapping */
1140                 if (rte_service_map_lcore_set(service_id,
1141                                 slcore_array[slcore_count], 0) != 0)
1142                         return -ENOENT;
1143                 service_count = rte_service_lcore_count_services(
1144                                 slcore_array[slcore_count]);
1145                 if (service_count < min_service_count) {
1146                         slcore = slcore_array[slcore_count];
1147                         min_service_count = service_count;
1148                 }
1149         }
1150         if (rte_service_map_lcore_set(service_id, slcore, 1))
1151                 return -ENOENT;
1152         rte_service_lcore_start(slcore);
1153
1154         return 0;
1155 }
1156
1157 static void
1158 l3fwd_event_service_setup(void)
1159 {
1160         struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1161         struct rte_event_dev_info evdev_info;
1162         uint32_t service_id, caps;
1163         int ret, i;
1164
1165         rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1166         if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1167                 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1168                                 &service_id);
1169                 if (ret != -ESRCH && ret != 0)
1170                         rte_exit(EXIT_FAILURE,
1171                                  "Error in starting eventdev service\n");
1172                 l3fwd_service_enable(service_id);
1173         }
1174
1175         for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1176                 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1177                                 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1178                 if (ret < 0)
1179                         rte_exit(EXIT_FAILURE,
1180                                  "Failed to get Rx adapter[%d] caps\n",
1181                                  evt_rsrc->rx_adptr.rx_adptr[i]);
1182                 ret = rte_event_eth_rx_adapter_service_id_get(
1183                                 evt_rsrc->event_d_id,
1184                                 &service_id);
1185                 if (ret != -ESRCH && ret != 0)
1186                         rte_exit(EXIT_FAILURE,
1187                                  "Error in starting Rx adapter[%d] service\n",
1188                                  evt_rsrc->rx_adptr.rx_adptr[i]);
1189                 l3fwd_service_enable(service_id);
1190         }
1191
1192         for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1193                 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1194                                 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1195                 if (ret < 0)
1196                         rte_exit(EXIT_FAILURE,
1197                                  "Failed to get Rx adapter[%d] caps\n",
1198                                  evt_rsrc->tx_adptr.tx_adptr[i]);
1199                 ret = rte_event_eth_tx_adapter_service_id_get(
1200                                 evt_rsrc->event_d_id,
1201                                 &service_id);
1202                 if (ret != -ESRCH && ret != 0)
1203                         rte_exit(EXIT_FAILURE,
1204                                  "Error in starting Rx adapter[%d] service\n",
1205                                  evt_rsrc->tx_adptr.tx_adptr[i]);
1206                 l3fwd_service_enable(service_id);
1207         }
1208 }
1209
1210 int
1211 main(int argc, char **argv)
1212 {
1213         struct l3fwd_event_resources *evt_rsrc;
1214         struct lcore_conf *qconf;
1215         uint16_t queueid, portid;
1216         unsigned int lcore_id;
1217         uint8_t queue;
1218         int i, ret;
1219
1220         /* init EAL */
1221         ret = rte_eal_init(argc, argv);
1222         if (ret < 0)
1223                 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1224         argc -= ret;
1225         argv += ret;
1226
1227         force_quit = false;
1228         signal(SIGINT, signal_handler);
1229         signal(SIGTERM, signal_handler);
1230
1231         /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1232         for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1233                 dest_eth_addr[portid] =
1234                         RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1235                 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1236         }
1237
1238         evt_rsrc = l3fwd_get_eventdev_rsrc();
1239         /* parse application arguments (after the EAL ones) */
1240         ret = parse_args(argc, argv);
1241         if (ret < 0)
1242                 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1243
1244         /* Setup function pointers for lookup method. */
1245         setup_l3fwd_lookup_tables();
1246
1247         evt_rsrc->per_port_pool = per_port_pool;
1248         evt_rsrc->pkt_pool = pktmbuf_pool;
1249         evt_rsrc->port_mask = enabled_port_mask;
1250         /* Configure eventdev parameters if user has requested */
1251         if (evt_rsrc->enabled) {
1252                 l3fwd_event_resource_setup(&port_conf);
1253                 if (l3fwd_em_on)
1254                         l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1255                 else
1256                         l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1257                 l3fwd_event_service_setup();
1258         } else
1259                 l3fwd_poll_resource_setup();
1260
1261         /* start ports */
1262         RTE_ETH_FOREACH_DEV(portid) {
1263                 if ((enabled_port_mask & (1 << portid)) == 0) {
1264                         continue;
1265                 }
1266                 /* Start device */
1267                 ret = rte_eth_dev_start(portid);
1268                 if (ret < 0)
1269                         rte_exit(EXIT_FAILURE,
1270                                 "rte_eth_dev_start: err=%d, port=%d\n",
1271                                 ret, portid);
1272
1273                 /*
1274                  * If enabled, put device in promiscuous mode.
1275                  * This allows IO forwarding mode to forward packets
1276                  * to itself through 2 cross-connected  ports of the
1277                  * target machine.
1278                  */
1279                 if (promiscuous_on) {
1280                         ret = rte_eth_promiscuous_enable(portid);
1281                         if (ret != 0)
1282                                 rte_exit(EXIT_FAILURE,
1283                                         "rte_eth_promiscuous_enable: err=%s, port=%u\n",
1284                                         rte_strerror(-ret), portid);
1285                 }
1286         }
1287
1288         printf("\n");
1289
1290         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1291                 if (rte_lcore_is_enabled(lcore_id) == 0)
1292                         continue;
1293                 qconf = &lcore_conf[lcore_id];
1294                 for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1295                         portid = qconf->rx_queue_list[queue].port_id;
1296                         queueid = qconf->rx_queue_list[queue].queue_id;
1297                         if (prepare_ptype_parser(portid, queueid) == 0)
1298                                 rte_exit(EXIT_FAILURE, "ptype check fails\n");
1299                 }
1300         }
1301
1302         check_all_ports_link_status(enabled_port_mask);
1303
1304         ret = 0;
1305         /* launch per-lcore init on every lcore */
1306         rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1307         if (evt_rsrc->enabled) {
1308                 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1309                         rte_event_eth_rx_adapter_stop(
1310                                         evt_rsrc->rx_adptr.rx_adptr[i]);
1311                 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1312                         rte_event_eth_tx_adapter_stop(
1313                                         evt_rsrc->tx_adptr.tx_adptr[i]);
1314
1315                 RTE_ETH_FOREACH_DEV(portid) {
1316                         if ((enabled_port_mask & (1 << portid)) == 0)
1317                                 continue;
1318                         ret = rte_eth_dev_stop(portid);
1319                         if (ret != 0)
1320                                 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1321                                        ret, portid);
1322                 }
1323
1324                 rte_eal_mp_wait_lcore();
1325                 RTE_ETH_FOREACH_DEV(portid) {
1326                         if ((enabled_port_mask & (1 << portid)) == 0)
1327                                 continue;
1328                         rte_eth_dev_close(portid);
1329                 }
1330
1331                 rte_event_dev_stop(evt_rsrc->event_d_id);
1332                 rte_event_dev_close(evt_rsrc->event_d_id);
1333
1334         } else {
1335                 rte_eal_mp_wait_lcore();
1336
1337                 RTE_ETH_FOREACH_DEV(portid) {
1338                         if ((enabled_port_mask & (1 << portid)) == 0)
1339                                 continue;
1340                         printf("Closing port %d...", portid);
1341                         ret = rte_eth_dev_stop(portid);
1342                         if (ret != 0)
1343                                 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1344                                        ret, portid);
1345                         rte_eth_dev_close(portid);
1346                         printf(" Done\n");
1347                 }
1348         }
1349         printf("Bye...\n");
1350
1351         return ret;
1352 }