1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2021 Intel Corporation
11 #include <sys/queue.h>
18 #include <rte_common.h>
20 #include <rte_byteorder.h>
22 #include <rte_malloc.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
26 #include <rte_launch.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_mempool.h>
41 #include <rte_string_fns.h>
42 #include <rte_cpuflags.h>
44 #include <cmdline_parse.h>
45 #include <cmdline_parse_etheraddr.h>
48 #include "l3fwd_event.h"
49 #include "l3fwd_route.h"
51 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_LCORE
52 #define MAX_RX_QUEUE_PER_PORT 128
54 #define MAX_LCORE_PARAMS 1024
56 /* Static global variables used within this file. */
57 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
58 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
60 /**< Ports set in promiscuous mode off by default. */
61 static int promiscuous_on;
63 /* Select Longest-Prefix, Exact match or Forwarding Information Base. */
64 enum L3FWD_LOOKUP_MODE {
70 static enum L3FWD_LOOKUP_MODE lookup_mode;
72 /* Global variables. */
74 static int numa_on = 1; /**< NUMA is enabled by default. */
75 static int parse_ptype; /**< Parse packet type using rx callback, and */
76 /**< disabled by default */
77 static int per_port_pool; /**< Use separate buffer pools per port; disabled */
80 volatile bool force_quit;
82 /* ethernet addresses of ports */
83 uint64_t dest_eth_addr[RTE_MAX_ETHPORTS];
84 struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
86 xmm_t val_eth[RTE_MAX_ETHPORTS];
88 /* mask of enabled ports */
89 uint32_t enabled_port_mask;
91 /* Used only in exact match mode. */
92 int ipv6; /**< ipv6 is false by default. */
93 uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
95 struct lcore_conf lcore_conf[RTE_MAX_LCORE];
97 struct parm_cfg parm_config;
103 } __rte_cache_aligned;
105 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
106 static struct lcore_params lcore_params_array_default[] = {
118 static struct lcore_params * lcore_params = lcore_params_array_default;
119 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
120 sizeof(lcore_params_array_default[0]);
122 static struct rte_eth_conf port_conf = {
124 .mq_mode = RTE_ETH_MQ_RX_RSS,
126 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
131 .rss_hf = RTE_ETH_RSS_IP,
135 .mq_mode = RTE_ETH_MQ_TX_NONE,
139 static uint32_t max_pkt_len;
141 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
142 static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS];
143 static uint8_t lkp_per_socket[NB_SOCKETS];
145 struct l3fwd_lkp_mode {
146 void (*read_config_files)(void);
148 int (*check_ptype)(int);
149 rte_rx_callback_fn cb_parse_ptype;
150 int (*main_loop)(void *);
151 void* (*get_ipv4_lookup_struct)(int);
152 void* (*get_ipv6_lookup_struct)(int);
153 void (*free_routes)(void);
156 static struct l3fwd_lkp_mode l3fwd_lkp;
158 static struct l3fwd_lkp_mode l3fwd_em_lkp = {
159 .read_config_files = read_config_files_em,
161 .check_ptype = em_check_ptype,
162 .cb_parse_ptype = em_cb_parse_ptype,
163 .main_loop = em_main_loop,
164 .get_ipv4_lookup_struct = em_get_ipv4_l3fwd_lookup_struct,
165 .get_ipv6_lookup_struct = em_get_ipv6_l3fwd_lookup_struct,
166 .free_routes = em_free_routes,
169 static struct l3fwd_lkp_mode l3fwd_lpm_lkp = {
170 .read_config_files = read_config_files_lpm,
172 .check_ptype = lpm_check_ptype,
173 .cb_parse_ptype = lpm_cb_parse_ptype,
174 .main_loop = lpm_main_loop,
175 .get_ipv4_lookup_struct = lpm_get_ipv4_l3fwd_lookup_struct,
176 .get_ipv6_lookup_struct = lpm_get_ipv6_l3fwd_lookup_struct,
177 .free_routes = lpm_free_routes,
180 static struct l3fwd_lkp_mode l3fwd_fib_lkp = {
181 .read_config_files = read_config_files_lpm,
183 .check_ptype = lpm_check_ptype,
184 .cb_parse_ptype = lpm_cb_parse_ptype,
185 .main_loop = fib_main_loop,
186 .get_ipv4_lookup_struct = fib_get_ipv4_l3fwd_lookup_struct,
187 .get_ipv6_lookup_struct = fib_get_ipv6_l3fwd_lookup_struct,
188 .free_routes = lpm_free_routes,
192 * 198.18.0.0/16 are set aside for RFC2544 benchmarking (RFC5735).
193 * 198.18.{0-15}.0/24 = Port {0-15}
195 const struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {
196 {RTE_IPV4(198, 18, 0, 0), 24, 0},
197 {RTE_IPV4(198, 18, 1, 0), 24, 1},
198 {RTE_IPV4(198, 18, 2, 0), 24, 2},
199 {RTE_IPV4(198, 18, 3, 0), 24, 3},
200 {RTE_IPV4(198, 18, 4, 0), 24, 4},
201 {RTE_IPV4(198, 18, 5, 0), 24, 5},
202 {RTE_IPV4(198, 18, 6, 0), 24, 6},
203 {RTE_IPV4(198, 18, 7, 0), 24, 7},
204 {RTE_IPV4(198, 18, 8, 0), 24, 8},
205 {RTE_IPV4(198, 18, 9, 0), 24, 9},
206 {RTE_IPV4(198, 18, 10, 0), 24, 10},
207 {RTE_IPV4(198, 18, 11, 0), 24, 11},
208 {RTE_IPV4(198, 18, 12, 0), 24, 12},
209 {RTE_IPV4(198, 18, 13, 0), 24, 13},
210 {RTE_IPV4(198, 18, 14, 0), 24, 14},
211 {RTE_IPV4(198, 18, 15, 0), 24, 15},
215 * 2001:200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180).
216 * 2001:200:0:{0-f}::/64 = Port {0-15}
218 const struct ipv6_l3fwd_route ipv6_l3fwd_route_array[] = {
219 {{32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 0},
220 {{32, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 1},
221 {{32, 1, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 2},
222 {{32, 1, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 3},
223 {{32, 1, 2, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 4},
224 {{32, 1, 2, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 5},
225 {{32, 1, 2, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 6},
226 {{32, 1, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 7},
227 {{32, 1, 2, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 8},
228 {{32, 1, 2, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 9},
229 {{32, 1, 2, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 10},
230 {{32, 1, 2, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 11},
231 {{32, 1, 2, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 12},
232 {{32, 1, 2, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 13},
233 {{32, 1, 2, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 14},
234 {{32, 1, 2, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0}, 64, 15},
238 * API's called during initialization to setup ACL/EM/LPM rules.
241 l3fwd_set_rule_ipv4_name(const char *optarg)
243 parm_config.rule_ipv4_name = optarg;
247 l3fwd_set_rule_ipv6_name(const char *optarg)
249 parm_config.rule_ipv6_name = optarg;
253 * Setup lookup methods for forwarding.
254 * Currently exact-match, longest-prefix-match and forwarding information
255 * base are the supported ones.
258 setup_l3fwd_lookup_tables(void)
260 /* Setup HASH lookup functions. */
261 if (lookup_mode == L3FWD_LOOKUP_EM)
262 l3fwd_lkp = l3fwd_em_lkp;
263 /* Setup FIB lookup functions. */
264 else if (lookup_mode == L3FWD_LOOKUP_FIB)
265 l3fwd_lkp = l3fwd_fib_lkp;
266 /* Setup LPM lookup functions. */
268 l3fwd_lkp = l3fwd_lpm_lkp;
272 check_lcore_params(void)
274 uint8_t queue, lcore;
278 for (i = 0; i < nb_lcore_params; ++i) {
279 queue = lcore_params[i].queue_id;
280 if (queue >= MAX_RX_QUEUE_PER_PORT) {
281 printf("invalid queue number: %hhu\n", queue);
284 lcore = lcore_params[i].lcore_id;
285 if (!rte_lcore_is_enabled(lcore)) {
286 printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
289 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
291 printf("warning: lcore %hhu is on socket %d with numa off \n",
299 check_port_config(void)
304 for (i = 0; i < nb_lcore_params; ++i) {
305 portid = lcore_params[i].port_id;
306 if ((enabled_port_mask & (1 << portid)) == 0) {
307 printf("port %u is not enabled in port mask\n", portid);
310 if (!rte_eth_dev_is_valid_port(portid)) {
311 printf("port %u is not present on the board\n", portid);
319 get_port_n_rx_queues(const uint16_t port)
324 for (i = 0; i < nb_lcore_params; ++i) {
325 if (lcore_params[i].port_id == port) {
326 if (lcore_params[i].queue_id == queue+1)
327 queue = lcore_params[i].queue_id;
329 rte_exit(EXIT_FAILURE, "queue ids of the port %d must be"
330 " in sequence and must start with 0\n",
331 lcore_params[i].port_id);
334 return (uint8_t)(++queue);
338 init_lcore_rx_queues(void)
340 uint16_t i, nb_rx_queue;
343 for (i = 0; i < nb_lcore_params; ++i) {
344 lcore = lcore_params[i].lcore_id;
345 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
346 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
347 printf("error: too many queues (%u) for lcore: %u\n",
348 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
351 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
352 lcore_params[i].port_id;
353 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
354 lcore_params[i].queue_id;
355 lcore_conf[lcore].n_rx_queue++;
363 print_usage(const char *prgname)
365 fprintf(stderr, "%s [EAL options] --"
371 " --config (port,queue,lcore)[,(port,queue,lcore)]"
372 " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
373 " [--max-pkt-len PKTLEN]"
375 " [--hash-entry-num]"
381 " [--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]"
385 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
386 " -P : Enable promiscuous mode\n"
387 " --lookup: Select the lookup method\n"
389 " Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base)\n"
390 " --config (port,queue,lcore): Rx queue configuration\n"
391 " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
392 " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
393 " --no-numa: Disable numa awareness\n"
394 " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
395 " --ipv6: Set if running ipv6 packets\n"
396 " --parse-ptype: Set to use software to analyze packet type\n"
397 " --per-port-pool: Use separate buffer pool per port\n"
398 " --mode: Packet transfer mode for I/O, poll or eventdev\n"
399 " Default mode = poll\n"
400 " --eventq-sched: Event queue synchronization method\n"
401 " ordered, atomic or parallel.\n"
403 " Valid only if --mode=eventdev\n"
404 " --event-eth-rxqs: Number of ethernet RX queues per device.\n"
406 " Valid only if --mode=eventdev\n"
407 " --event-vector: Enable event vectorization.\n"
408 " --event-vector-size: Max vector size if event vectorization is enabled.\n"
409 " --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
410 " -E : Enable exact match, legacy flag please use --lookup=em instead\n"
411 " -L : Enable longest prefix match, legacy flag please use --lookup=lpm instead\n"
412 " --rule_ipv4=FILE: Specify the ipv4 rules entries file.\n"
413 " Each rule occupies one line.\n"
414 " --rule_ipv6=FILE: Specify the ipv6 rules entries file.\n\n",
419 parse_max_pkt_len(const char *pktlen)
424 /* parse decimal string */
425 len = strtoul(pktlen, &end, 10);
426 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
436 parse_portmask(const char *portmask)
441 /* parse hexadecimal string */
442 pm = strtoul(portmask, &end, 16);
443 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
450 parse_hash_entry_number(const char *hash_entry_num)
453 unsigned long hash_en;
454 /* parse hexadecimal string */
455 hash_en = strtoul(hash_entry_num, &end, 16);
456 if ((hash_entry_num[0] == '\0') || (end == NULL) || (*end != '\0'))
466 parse_config(const char *q_arg)
469 const char *p, *p0 = q_arg;
477 unsigned long int_fld[_NUM_FLD];
478 char *str_fld[_NUM_FLD];
484 while ((p = strchr(p0,'(')) != NULL) {
486 if((p0 = strchr(p,')')) == NULL)
490 if(size >= sizeof(s))
493 snprintf(s, sizeof(s), "%.*s", size, p);
494 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
496 for (i = 0; i < _NUM_FLD; i++){
498 int_fld[i] = strtoul(str_fld[i], &end, 0);
499 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
502 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
503 printf("exceeded max number of lcore params: %hu\n",
507 lcore_params_array[nb_lcore_params].port_id =
508 (uint8_t)int_fld[FLD_PORT];
509 lcore_params_array[nb_lcore_params].queue_id =
510 (uint8_t)int_fld[FLD_QUEUE];
511 lcore_params_array[nb_lcore_params].lcore_id =
512 (uint8_t)int_fld[FLD_LCORE];
515 lcore_params = lcore_params_array;
520 parse_eth_dest(const char *optarg)
524 uint8_t c, *dest, peer_addr[6];
527 portid = strtoul(optarg, &port_end, 10);
528 if (errno != 0 || port_end == optarg || *port_end++ != ',')
529 rte_exit(EXIT_FAILURE,
530 "Invalid eth-dest: %s", optarg);
531 if (portid >= RTE_MAX_ETHPORTS)
532 rte_exit(EXIT_FAILURE,
533 "eth-dest: port %d >= RTE_MAX_ETHPORTS(%d)\n",
534 portid, RTE_MAX_ETHPORTS);
536 if (cmdline_parse_etheraddr(NULL, port_end,
537 &peer_addr, sizeof(peer_addr)) < 0)
538 rte_exit(EXIT_FAILURE,
539 "Invalid ethernet address: %s\n",
541 dest = (uint8_t *)&dest_eth_addr[portid];
542 for (c = 0; c < 6; c++)
543 dest[c] = peer_addr[c];
544 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
548 parse_mode(const char *optarg)
550 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
552 if (!strcmp(optarg, "poll"))
553 evt_rsrc->enabled = false;
554 else if (!strcmp(optarg, "eventdev"))
555 evt_rsrc->enabled = true;
559 parse_eventq_sched(const char *optarg)
561 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
563 if (!strcmp(optarg, "ordered"))
564 evt_rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
565 if (!strcmp(optarg, "atomic"))
566 evt_rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
567 if (!strcmp(optarg, "parallel"))
568 evt_rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
572 parse_event_eth_rx_queues(const char *eth_rx_queues)
574 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
576 uint8_t num_eth_rx_queues;
578 /* parse decimal string */
579 num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
580 if ((eth_rx_queues[0] == '\0') || (end == NULL) || (*end != '\0'))
583 if (num_eth_rx_queues == 0)
586 evt_rsrc->eth_rx_queues = num_eth_rx_queues;
590 parse_lookup(const char *optarg)
592 if (!strcmp(optarg, "em"))
593 lookup_mode = L3FWD_LOOKUP_EM;
594 else if (!strcmp(optarg, "lpm"))
595 lookup_mode = L3FWD_LOOKUP_LPM;
596 else if (!strcmp(optarg, "fib"))
597 lookup_mode = L3FWD_LOOKUP_FIB;
599 fprintf(stderr, "Invalid lookup option! Accepted options: em, lpm, fib\n");
605 #define MAX_JUMBO_PKT_LEN 9600
607 static const char short_options[] =
609 "P" /* promiscuous */
610 "L" /* legacy enable long prefix match */
611 "E" /* legacy enable exact match */
614 #define CMD_LINE_OPT_CONFIG "config"
615 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
616 #define CMD_LINE_OPT_NO_NUMA "no-numa"
617 #define CMD_LINE_OPT_IPV6 "ipv6"
618 #define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
619 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
620 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
621 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
622 #define CMD_LINE_OPT_MODE "mode"
623 #define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sched"
624 #define CMD_LINE_OPT_EVENT_ETH_RX_QUEUES "event-eth-rxqs"
625 #define CMD_LINE_OPT_LOOKUP "lookup"
626 #define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
627 #define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
628 #define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
629 #define CMD_LINE_OPT_RULE_IPV4 "rule_ipv4"
630 #define CMD_LINE_OPT_RULE_IPV6 "rule_ipv6"
633 /* long options mapped to a short option */
635 /* first long only option value must be >= 256, so that we won't
636 * conflict with short options */
637 CMD_LINE_OPT_MIN_NUM = 256,
638 CMD_LINE_OPT_CONFIG_NUM,
639 CMD_LINE_OPT_ETH_DEST_NUM,
640 CMD_LINE_OPT_NO_NUMA_NUM,
641 CMD_LINE_OPT_IPV6_NUM,
642 CMD_LINE_OPT_MAX_PKT_LEN_NUM,
643 CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
644 CMD_LINE_OPT_PARSE_PTYPE_NUM,
645 CMD_LINE_OPT_RULE_IPV4_NUM,
646 CMD_LINE_OPT_RULE_IPV6_NUM,
647 CMD_LINE_OPT_PARSE_PER_PORT_POOL,
648 CMD_LINE_OPT_MODE_NUM,
649 CMD_LINE_OPT_EVENTQ_SYNC_NUM,
650 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM,
651 CMD_LINE_OPT_LOOKUP_NUM,
652 CMD_LINE_OPT_ENABLE_VECTOR_NUM,
653 CMD_LINE_OPT_VECTOR_SIZE_NUM,
654 CMD_LINE_OPT_VECTOR_TMO_NS_NUM
657 static const struct option lgopts[] = {
658 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
659 {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
660 {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
661 {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
662 {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
663 {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
664 {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
665 {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
666 {CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
667 {CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
668 {CMD_LINE_OPT_EVENT_ETH_RX_QUEUES, 1, 0,
669 CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM},
670 {CMD_LINE_OPT_LOOKUP, 1, 0, CMD_LINE_OPT_LOOKUP_NUM},
671 {CMD_LINE_OPT_ENABLE_VECTOR, 0, 0, CMD_LINE_OPT_ENABLE_VECTOR_NUM},
672 {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
673 {CMD_LINE_OPT_VECTOR_TMO_NS, 1, 0, CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
674 {CMD_LINE_OPT_RULE_IPV4, 1, 0, CMD_LINE_OPT_RULE_IPV4_NUM},
675 {CMD_LINE_OPT_RULE_IPV6, 1, 0, CMD_LINE_OPT_RULE_IPV6_NUM},
680 * This expression is used to calculate the number of mbufs needed
681 * depending on user input, taking into account memory for rx and
682 * tx hardware rings, cache per lcore and mtable per port per lcore.
683 * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
686 #define NB_MBUF(nports) RTE_MAX( \
687 (nports*nb_rx_queue*nb_rxd + \
688 nports*nb_lcores*MAX_PKT_BURST + \
689 nports*n_tx_queue*nb_txd + \
690 nb_lcores*MEMPOOL_CACHE_SIZE), \
693 /* Parse the argument given in the command line of the application */
695 parse_args(int argc, char **argv)
700 char *prgname = argv[0];
701 uint8_t lcore_params = 0;
702 uint8_t eventq_sched = 0;
703 uint8_t eth_rx_q = 0;
704 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
708 /* Error or normal output strings. */
709 while ((opt = getopt_long(argc, argvopt, short_options,
710 lgopts, &option_index)) != EOF) {
715 enabled_port_mask = parse_portmask(optarg);
716 if (enabled_port_mask == 0) {
717 fprintf(stderr, "Invalid portmask\n");
718 print_usage(prgname);
728 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
729 fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
732 lookup_mode = L3FWD_LOOKUP_EM;
736 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
737 fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
740 lookup_mode = L3FWD_LOOKUP_LPM;
744 case CMD_LINE_OPT_CONFIG_NUM:
745 ret = parse_config(optarg);
747 fprintf(stderr, "Invalid config\n");
748 print_usage(prgname);
754 case CMD_LINE_OPT_ETH_DEST_NUM:
755 parse_eth_dest(optarg);
758 case CMD_LINE_OPT_NO_NUMA_NUM:
762 case CMD_LINE_OPT_IPV6_NUM:
766 case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
767 max_pkt_len = parse_max_pkt_len(optarg);
770 case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
771 ret = parse_hash_entry_number(optarg);
772 if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
773 hash_entry_number = ret;
775 fprintf(stderr, "invalid hash entry number\n");
776 print_usage(prgname);
781 case CMD_LINE_OPT_PARSE_PTYPE_NUM:
782 printf("soft parse-ptype is enabled\n");
786 case CMD_LINE_OPT_PARSE_PER_PORT_POOL:
787 printf("per port buffer pool is enabled\n");
791 case CMD_LINE_OPT_MODE_NUM:
795 case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
796 parse_eventq_sched(optarg);
800 case CMD_LINE_OPT_EVENT_ETH_RX_QUEUES_NUM:
801 parse_event_eth_rx_queues(optarg);
805 case CMD_LINE_OPT_LOOKUP_NUM:
806 if (lookup_mode != L3FWD_LOOKUP_DEFAULT) {
807 fprintf(stderr, "Only one lookup mode is allowed at a time!\n");
810 ret = parse_lookup(optarg);
812 * If parse_lookup was passed an invalid lookup type
813 * then return -1. Error log included within
814 * parse_lookup for simplicity.
820 case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
821 printf("event vectorization is enabled\n");
822 evt_rsrc->vector_enabled = 1;
824 case CMD_LINE_OPT_VECTOR_SIZE_NUM:
825 evt_rsrc->vector_size = strtol(optarg, NULL, 10);
827 case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
828 evt_rsrc->vector_tmo_ns = strtoull(optarg, NULL, 10);
830 case CMD_LINE_OPT_RULE_IPV4_NUM:
831 l3fwd_set_rule_ipv4_name(optarg);
833 case CMD_LINE_OPT_RULE_IPV6_NUM:
834 l3fwd_set_rule_ipv6_name(optarg);
837 print_usage(prgname);
842 if (evt_rsrc->enabled && lcore_params) {
843 fprintf(stderr, "lcore config is not valid when event mode is selected\n");
847 if (!evt_rsrc->enabled && eth_rx_q) {
848 fprintf(stderr, "eth_rx_queues is valid only when event mode is selected\n");
852 if (!evt_rsrc->enabled && eventq_sched) {
853 fprintf(stderr, "eventq_sched is valid only when event mode is selected\n");
857 if (evt_rsrc->vector_enabled && !evt_rsrc->vector_size) {
858 evt_rsrc->vector_size = VECTOR_SIZE_DEFAULT;
859 fprintf(stderr, "vector size set to default (%" PRIu16 ")\n",
860 evt_rsrc->vector_size);
863 if (evt_rsrc->vector_enabled && !evt_rsrc->vector_tmo_ns) {
864 evt_rsrc->vector_tmo_ns = VECTOR_TMO_NS_DEFAULT;
866 "vector timeout set to default (%" PRIu64 " ns)\n",
867 evt_rsrc->vector_tmo_ns);
871 * Nothing is selected, pick longest-prefix match
874 if (lookup_mode == L3FWD_LOOKUP_DEFAULT) {
875 fprintf(stderr, "Neither LPM, EM, or FIB selected, defaulting to LPM\n");
876 lookup_mode = L3FWD_LOOKUP_LPM;
880 * ipv6 and hash flags are valid only for
881 * exact match, reset them to default for
882 * longest-prefix match.
884 if (lookup_mode == L3FWD_LOOKUP_LPM) {
886 hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
890 argv[optind-1] = prgname;
893 optind = 1; /* reset getopt lib */
898 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
900 char buf[RTE_ETHER_ADDR_FMT_SIZE];
901 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
902 printf("%s%s", name, buf);
906 init_mem(uint16_t portid, unsigned int nb_mbuf)
908 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
909 struct lcore_conf *qconf;
914 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
915 if (rte_lcore_is_enabled(lcore_id) == 0)
919 socketid = rte_lcore_to_socket_id(lcore_id);
923 if (socketid >= NB_SOCKETS) {
924 rte_exit(EXIT_FAILURE,
925 "Socket %d of lcore %u is out of range %d\n",
926 socketid, lcore_id, NB_SOCKETS);
929 if (pktmbuf_pool[portid][socketid] == NULL) {
930 snprintf(s, sizeof(s), "mbuf_pool_%d:%d",
932 pktmbuf_pool[portid][socketid] =
933 rte_pktmbuf_pool_create(s, nb_mbuf,
934 MEMPOOL_CACHE_SIZE, 0,
935 RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
936 if (pktmbuf_pool[portid][socketid] == NULL)
937 rte_exit(EXIT_FAILURE,
938 "Cannot init mbuf pool on socket %d\n",
941 printf("Allocated mbuf pool on socket %d\n",
944 /* Setup LPM, EM(f.e Hash) or FIB. But, only once per
947 if (!lkp_per_socket[socketid]) {
948 l3fwd_lkp.setup(socketid);
949 lkp_per_socket[socketid] = 1;
953 if (evt_rsrc->vector_enabled && vector_pool[portid] == NULL) {
956 nb_vec = (nb_mbuf + evt_rsrc->vector_size - 1) /
957 evt_rsrc->vector_size;
958 snprintf(s, sizeof(s), "vector_pool_%d", portid);
959 vector_pool[portid] = rte_event_vector_pool_create(
960 s, nb_vec, 0, evt_rsrc->vector_size, socketid);
961 if (vector_pool[portid] == NULL)
962 rte_exit(EXIT_FAILURE,
963 "Failed to create vector pool for port %d\n",
966 printf("Allocated vector pool for port %d\n",
970 qconf = &lcore_conf[lcore_id];
971 qconf->ipv4_lookup_struct =
972 l3fwd_lkp.get_ipv4_lookup_struct(socketid);
973 qconf->ipv6_lookup_struct =
974 l3fwd_lkp.get_ipv6_lookup_struct(socketid);
979 /* Check the link status of all ports in up to 9s, and print them finally */
981 check_all_ports_link_status(uint32_t port_mask)
983 #define CHECK_INTERVAL 100 /* 100ms */
984 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
986 uint8_t count, all_ports_up, print_flag = 0;
987 struct rte_eth_link link;
989 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
991 printf("\nChecking link status");
993 for (count = 0; count <= MAX_CHECK_TIME; count++) {
997 RTE_ETH_FOREACH_DEV(portid) {
1000 if ((port_mask & (1 << portid)) == 0)
1002 memset(&link, 0, sizeof(link));
1003 ret = rte_eth_link_get_nowait(portid, &link);
1006 if (print_flag == 1)
1007 printf("Port %u link get failed: %s\n",
1008 portid, rte_strerror(-ret));
1011 /* print link status if flag set */
1012 if (print_flag == 1) {
1013 rte_eth_link_to_str(link_status_text,
1014 sizeof(link_status_text), &link);
1015 printf("Port %d %s\n", portid,
1019 /* clear all_ports_up flag if any link down */
1020 if (link.link_status == RTE_ETH_LINK_DOWN) {
1025 /* after finally printing all link status, get out */
1026 if (print_flag == 1)
1029 if (all_ports_up == 0) {
1032 rte_delay_ms(CHECK_INTERVAL);
1035 /* set the print_flag if all ports up or timeout */
1036 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1044 signal_handler(int signum)
1046 if (signum == SIGINT || signum == SIGTERM) {
1047 printf("\n\nSignal %d received, preparing to exit...\n",
1054 prepare_ptype_parser(uint16_t portid, uint16_t queueid)
1057 printf("Port %d: softly parse packet type info\n", portid);
1058 if (rte_eth_add_rx_callback(portid, queueid,
1059 l3fwd_lkp.cb_parse_ptype,
1063 printf("Failed to add rx callback: port=%d\n", portid);
1067 if (l3fwd_lkp.check_ptype(portid))
1070 printf("port %d cannot parse packet type, please add --%s\n",
1071 portid, CMD_LINE_OPT_PARSE_PTYPE);
1076 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1078 uint32_t overhead_len;
1080 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1081 overhead_len = max_rx_pktlen - max_mtu;
1083 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1085 return overhead_len;
1089 config_port_max_pkt_len(struct rte_eth_conf *conf,
1090 struct rte_eth_dev_info *dev_info)
1092 uint32_t overhead_len;
1094 if (max_pkt_len == 0)
1097 if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
1100 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1102 conf->rxmode.mtu = max_pkt_len - overhead_len;
1104 if (conf->rxmode.mtu > RTE_ETHER_MTU)
1105 conf->txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1111 l3fwd_poll_resource_setup(void)
1113 uint8_t nb_rx_queue, queue, socketid;
1114 struct rte_eth_dev_info dev_info;
1115 uint32_t n_tx_queue, nb_lcores;
1116 struct rte_eth_txconf *txconf;
1117 struct lcore_conf *qconf;
1118 uint16_t queueid, portid;
1119 unsigned int nb_ports;
1120 unsigned int lcore_id;
1123 if (check_lcore_params() < 0)
1124 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1126 ret = init_lcore_rx_queues();
1128 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1130 nb_ports = rte_eth_dev_count_avail();
1132 if (check_port_config() < 0)
1133 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1135 nb_lcores = rte_lcore_count();
1137 /* initialize all ports */
1138 RTE_ETH_FOREACH_DEV(portid) {
1139 struct rte_eth_conf local_port_conf = port_conf;
1141 /* skip ports that are not enabled */
1142 if ((enabled_port_mask & (1 << portid)) == 0) {
1143 printf("\nSkipping disabled port %d\n", portid);
1148 printf("Initializing port %d ... ", portid );
1151 nb_rx_queue = get_port_n_rx_queues(portid);
1152 n_tx_queue = nb_lcores;
1153 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1154 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1155 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1156 nb_rx_queue, (unsigned)n_tx_queue );
1158 ret = rte_eth_dev_info_get(portid, &dev_info);
1160 rte_exit(EXIT_FAILURE,
1161 "Error during getting device (port %u) info: %s\n",
1162 portid, strerror(-ret));
1164 ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
1166 rte_exit(EXIT_FAILURE,
1167 "Invalid max packet length: %u (port %u)\n",
1168 max_pkt_len, portid);
1170 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
1171 local_port_conf.txmode.offloads |=
1172 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
1174 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1175 dev_info.flow_type_rss_offloads;
1177 if (dev_info.max_rx_queues == 1)
1178 local_port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
1180 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1181 port_conf.rx_adv_conf.rss_conf.rss_hf) {
1182 printf("Port %u modified RSS hash function based on hardware support,"
1183 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1185 port_conf.rx_adv_conf.rss_conf.rss_hf,
1186 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1189 ret = rte_eth_dev_configure(portid, nb_rx_queue,
1190 (uint16_t)n_tx_queue, &local_port_conf);
1192 rte_exit(EXIT_FAILURE,
1193 "Cannot configure device: err=%d, port=%d\n",
1196 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1199 rte_exit(EXIT_FAILURE,
1200 "Cannot adjust number of descriptors: err=%d, "
1201 "port=%d\n", ret, portid);
1203 ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1205 rte_exit(EXIT_FAILURE,
1206 "Cannot get MAC address: err=%d, port=%d\n",
1209 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1211 print_ethaddr("Destination:",
1212 (const struct rte_ether_addr *)&dest_eth_addr[portid]);
1216 * prepare src MACs for each port.
1218 rte_ether_addr_copy(&ports_eth_addr[portid],
1219 (struct rte_ether_addr *)(val_eth + portid) + 1);
1222 if (!per_port_pool) {
1223 /* portid = 0; this is *not* signifying the first port,
1224 * rather, it signifies that portid is ignored.
1226 ret = init_mem(0, NB_MBUF(nb_ports));
1228 ret = init_mem(portid, NB_MBUF(1));
1231 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1233 /* init one TX queue per couple (lcore,port) */
1235 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1236 if (rte_lcore_is_enabled(lcore_id) == 0)
1241 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1245 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1248 txconf = &dev_info.default_txconf;
1249 txconf->offloads = local_port_conf.txmode.offloads;
1250 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1253 rte_exit(EXIT_FAILURE,
1254 "rte_eth_tx_queue_setup: err=%d, "
1255 "port=%d\n", ret, portid);
1257 qconf = &lcore_conf[lcore_id];
1258 qconf->tx_queue_id[portid] = queueid;
1261 qconf->tx_port_id[qconf->n_tx_port] = portid;
1267 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1268 if (rte_lcore_is_enabled(lcore_id) == 0)
1270 qconf = &lcore_conf[lcore_id];
1271 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1273 /* init RX queues */
1274 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1275 struct rte_eth_rxconf rxq_conf;
1277 portid = qconf->rx_queue_list[queue].port_id;
1278 queueid = qconf->rx_queue_list[queue].queue_id;
1282 (uint8_t)rte_lcore_to_socket_id(lcore_id);
1286 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1289 ret = rte_eth_dev_info_get(portid, &dev_info);
1291 rte_exit(EXIT_FAILURE,
1292 "Error during getting device (port %u) info: %s\n",
1293 portid, strerror(-ret));
1295 rxq_conf = dev_info.default_rxconf;
1296 rxq_conf.offloads = port_conf.rxmode.offloads;
1298 ret = rte_eth_rx_queue_setup(portid, queueid,
1301 pktmbuf_pool[0][socketid]);
1303 ret = rte_eth_rx_queue_setup(portid, queueid,
1306 pktmbuf_pool[portid][socketid]);
1308 rte_exit(EXIT_FAILURE,
1309 "rte_eth_rx_queue_setup: err=%d, port=%d\n",
1316 l3fwd_service_enable(uint32_t service_id)
1318 uint8_t min_service_count = UINT8_MAX;
1319 uint32_t slcore_array[RTE_MAX_LCORE];
1320 unsigned int slcore = 0;
1321 uint8_t service_count;
1322 int32_t slcore_count;
1324 if (!rte_service_lcore_count())
1327 slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
1328 if (slcore_count < 0)
1330 /* Get the core which has least number of services running. */
1331 while (slcore_count--) {
1332 /* Reset default mapping */
1333 if (rte_service_map_lcore_set(service_id,
1334 slcore_array[slcore_count], 0) != 0)
1336 service_count = rte_service_lcore_count_services(
1337 slcore_array[slcore_count]);
1338 if (service_count < min_service_count) {
1339 slcore = slcore_array[slcore_count];
1340 min_service_count = service_count;
1343 if (rte_service_map_lcore_set(service_id, slcore, 1))
1345 rte_service_lcore_start(slcore);
1351 l3fwd_event_service_setup(void)
1353 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
1354 struct rte_event_dev_info evdev_info;
1355 uint32_t service_id, caps;
1358 rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
1359 if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
1360 ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
1362 if (ret != -ESRCH && ret != 0)
1363 rte_exit(EXIT_FAILURE,
1364 "Error in starting eventdev service\n");
1365 l3fwd_service_enable(service_id);
1368 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
1369 ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
1370 evt_rsrc->rx_adptr.rx_adptr[i], &caps);
1372 rte_exit(EXIT_FAILURE,
1373 "Failed to get Rx adapter[%d] caps\n",
1374 evt_rsrc->rx_adptr.rx_adptr[i]);
1375 ret = rte_event_eth_rx_adapter_service_id_get(
1376 evt_rsrc->event_d_id,
1378 if (ret != -ESRCH && ret != 0)
1379 rte_exit(EXIT_FAILURE,
1380 "Error in starting Rx adapter[%d] service\n",
1381 evt_rsrc->rx_adptr.rx_adptr[i]);
1382 l3fwd_service_enable(service_id);
1385 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
1386 ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
1387 evt_rsrc->tx_adptr.tx_adptr[i], &caps);
1389 rte_exit(EXIT_FAILURE,
1390 "Failed to get Rx adapter[%d] caps\n",
1391 evt_rsrc->tx_adptr.tx_adptr[i]);
1392 ret = rte_event_eth_tx_adapter_service_id_get(
1393 evt_rsrc->event_d_id,
1395 if (ret != -ESRCH && ret != 0)
1396 rte_exit(EXIT_FAILURE,
1397 "Error in starting Rx adapter[%d] service\n",
1398 evt_rsrc->tx_adptr.tx_adptr[i]);
1399 l3fwd_service_enable(service_id);
1404 main(int argc, char **argv)
1406 struct l3fwd_event_resources *evt_rsrc;
1407 struct lcore_conf *qconf;
1408 uint16_t queueid, portid;
1409 unsigned int lcore_id;
1414 ret = rte_eal_init(argc, argv);
1416 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1421 signal(SIGINT, signal_handler);
1422 signal(SIGTERM, signal_handler);
1424 /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
1425 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1426 dest_eth_addr[portid] =
1427 RTE_ETHER_LOCAL_ADMIN_ADDR + ((uint64_t)portid << 40);
1428 *(uint64_t *)(val_eth + portid) = dest_eth_addr[portid];
1431 evt_rsrc = l3fwd_get_eventdev_rsrc();
1432 /* parse application arguments (after the EAL ones) */
1433 ret = parse_args(argc, argv);
1435 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1437 /* Setup function pointers for lookup method. */
1438 setup_l3fwd_lookup_tables();
1440 /* Add the config file rules */
1441 l3fwd_lkp.read_config_files();
1443 evt_rsrc->per_port_pool = per_port_pool;
1444 evt_rsrc->pkt_pool = pktmbuf_pool;
1445 evt_rsrc->vec_pool = vector_pool;
1446 evt_rsrc->port_mask = enabled_port_mask;
1447 /* Configure eventdev parameters if user has requested */
1448 if (evt_rsrc->enabled) {
1449 l3fwd_event_resource_setup(&port_conf);
1450 if (lookup_mode == L3FWD_LOOKUP_EM)
1451 l3fwd_lkp.main_loop = evt_rsrc->ops.em_event_loop;
1452 else if (lookup_mode == L3FWD_LOOKUP_FIB)
1453 l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop;
1455 l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop;
1456 l3fwd_event_service_setup();
1458 l3fwd_poll_resource_setup();
1461 RTE_ETH_FOREACH_DEV(portid) {
1462 if ((enabled_port_mask & (1 << portid)) == 0) {
1466 ret = rte_eth_dev_start(portid);
1468 rte_exit(EXIT_FAILURE,
1469 "rte_eth_dev_start: err=%d, port=%d\n",
1473 * If enabled, put device in promiscuous mode.
1474 * This allows IO forwarding mode to forward packets
1475 * to itself through 2 cross-connected ports of the
1478 if (promiscuous_on) {
1479 ret = rte_eth_promiscuous_enable(portid);
1481 rte_exit(EXIT_FAILURE,
1482 "rte_eth_promiscuous_enable: err=%s, port=%u\n",
1483 rte_strerror(-ret), portid);
1489 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1490 if (rte_lcore_is_enabled(lcore_id) == 0)
1492 qconf = &lcore_conf[lcore_id];
1493 for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
1494 portid = qconf->rx_queue_list[queue].port_id;
1495 queueid = qconf->rx_queue_list[queue].queue_id;
1496 if (prepare_ptype_parser(portid, queueid) == 0)
1497 rte_exit(EXIT_FAILURE, "ptype check fails\n");
1501 check_all_ports_link_status(enabled_port_mask);
1504 /* launch per-lcore init on every lcore */
1505 rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
1506 if (evt_rsrc->enabled) {
1507 for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
1508 rte_event_eth_rx_adapter_stop(
1509 evt_rsrc->rx_adptr.rx_adptr[i]);
1510 for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
1511 rte_event_eth_tx_adapter_stop(
1512 evt_rsrc->tx_adptr.tx_adptr[i]);
1514 RTE_ETH_FOREACH_DEV(portid) {
1515 if ((enabled_port_mask & (1 << portid)) == 0)
1517 ret = rte_eth_dev_stop(portid);
1519 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1523 rte_eal_mp_wait_lcore();
1524 RTE_ETH_FOREACH_DEV(portid) {
1525 if ((enabled_port_mask & (1 << portid)) == 0)
1527 rte_eth_dev_close(portid);
1530 rte_event_dev_stop(evt_rsrc->event_d_id);
1531 rte_event_dev_close(evt_rsrc->event_d_id);
1534 rte_eal_mp_wait_lcore();
1536 RTE_ETH_FOREACH_DEV(portid) {
1537 if ((enabled_port_mask & (1 << portid)) == 0)
1539 printf("Closing port %d...", portid);
1540 ret = rte_eth_dev_stop(portid);
1542 printf("rte_eth_dev_stop: err=%d, port=%u\n",
1544 rte_eth_dev_close(portid);
1549 /* clean up config file routes */
1550 l3fwd_lkp.free_routes();
1552 /* clean up the EAL */