1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
15 #include <netinet/in.h>
17 #include <linux/if_tun.h>
19 #include <sys/ioctl.h>
23 #include <rte_common.h>
25 #include <rte_memory.h>
26 #include <rte_memcpy.h>
28 #include <rte_per_lcore.h>
29 #include <rte_launch.h>
30 #include <rte_atomic.h>
31 #include <rte_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_bus_pci.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_malloc.h>
45 /* Macros for printing using RTE_LOG */
46 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
48 /* Max size of a single packet */
49 #define MAX_PACKET_SZ 2048
51 /* Size of the data buffer in each mbuf */
52 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
54 /* Number of mbufs in mempool that is created */
55 #define NB_MBUF (8192 * 16)
57 /* How many packets to attempt to read from NIC in one go */
58 #define PKT_BURST_SZ 32
60 /* How many objects (mbufs) to keep in per-lcore mempool cache */
61 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
63 /* Number of RX ring descriptors */
66 /* Number of TX ring descriptors */
69 /* Total octets in ethernet header */
70 #define KNI_ENET_HEADER_SIZE 14
72 /* Total octets in the FCS */
73 #define KNI_ENET_FCS_SIZE 4
75 #define KNI_US_PER_SECOND 1000000
76 #define KNI_SECOND_PER_DAY 86400
78 #define KNI_MAX_KTHREAD 32
80 * Structure of port parameters
82 struct kni_port_params {
83 uint16_t port_id;/* Port ID */
84 unsigned lcore_rx; /* lcore ID for RX */
85 unsigned lcore_tx; /* lcore ID for TX */
86 uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
87 uint32_t nb_kni; /* Number of KNI devices to be created */
88 unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */
89 struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */
90 } __rte_cache_aligned;
92 static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
95 /* Options for configuring ethernet port */
96 static struct rte_eth_conf port_conf = {
98 .header_split = 0, /* Header Split disabled */
99 .hw_ip_checksum = 0, /* IP checksum offload disabled */
100 .hw_vlan_filter = 0, /* VLAN filtering disabled */
101 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
102 .hw_strip_crc = 1, /* CRC stripped by hardware */
105 .mq_mode = ETH_MQ_TX_NONE,
109 /* Mempool for mbufs */
110 static struct rte_mempool * pktmbuf_pool = NULL;
112 /* Mask of enabled ports */
113 static uint32_t ports_mask = 0;
114 /* Ports set in promiscuous mode off by default. */
115 static int promiscuous_on = 0;
117 /* Structure type for recording kni interface specific stats */
118 struct kni_interface_stats {
119 /* number of pkts received from NIC, and sent to KNI */
122 /* number of pkts received from NIC, but failed to send to KNI */
125 /* number of pkts received from KNI, and sent to NIC */
128 /* number of pkts received from KNI, but failed to send to NIC */
132 /* kni device statistics array */
133 static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
135 static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
136 static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
138 static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
140 /* Print out statistics on packets handled */
146 printf("\n**KNI example application statistics**\n"
147 "====== ============== ============ ============ ============ ============\n"
148 " Port Lcore(RX/TX) rx_packets rx_dropped tx_packets tx_dropped\n"
149 "------ -------------- ------------ ------------ ------------ ------------\n");
150 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
151 if (!kni_port_params_array[i])
154 printf("%7d %10u/%2u %13"PRIu64" %13"PRIu64" %13"PRIu64" "
156 kni_port_params_array[i]->lcore_rx,
157 kni_port_params_array[i]->lcore_tx,
158 kni_stats[i].rx_packets,
159 kni_stats[i].rx_dropped,
160 kni_stats[i].tx_packets,
161 kni_stats[i].tx_dropped);
163 printf("====== ============== ============ ============ ============ ============\n");
166 /* Custom handling of signals to handle stats and kni processing */
168 signal_handler(int signum)
170 /* When we receive a USR1 signal, print stats */
171 if (signum == SIGUSR1) {
175 /* When we receive a USR2 signal, reset stats */
176 if (signum == SIGUSR2) {
177 memset(&kni_stats, 0, sizeof(kni_stats));
178 printf("\n**Statistics have been reset**\n");
182 /* When we receive a RTMIN or SIGINT signal, stop kni processing */
183 if (signum == SIGRTMIN || signum == SIGINT){
184 printf("SIGRTMIN is received, and the KNI processing is "
186 rte_atomic32_inc(&kni_stop);
192 kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
199 for (i = 0; i < num; i++) {
200 rte_pktmbuf_free(pkts[i]);
206 * Interface to burst rx and enqueue mbufs into rx_q
209 kni_ingress(struct kni_port_params *p)
215 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
221 port_id = p->port_id;
222 for (i = 0; i < nb_kni; i++) {
223 /* Burst rx from eth */
224 nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ);
225 if (unlikely(nb_rx > PKT_BURST_SZ)) {
226 RTE_LOG(ERR, APP, "Error receiving from eth\n");
229 /* Burst tx to kni */
230 num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
231 kni_stats[port_id].rx_packets += num;
233 rte_kni_handle_request(p->kni[i]);
234 if (unlikely(num < nb_rx)) {
235 /* Free mbufs not tx to kni interface */
236 kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
237 kni_stats[port_id].rx_dropped += nb_rx - num;
243 * Interface to dequeue mbufs from tx_q and burst tx
246 kni_egress(struct kni_port_params *p)
252 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
258 port_id = p->port_id;
259 for (i = 0; i < nb_kni; i++) {
260 /* Burst rx from kni */
261 num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
262 if (unlikely(num > PKT_BURST_SZ)) {
263 RTE_LOG(ERR, APP, "Error receiving from KNI\n");
266 /* Burst tx to eth */
267 nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
268 kni_stats[port_id].tx_packets += nb_tx;
269 if (unlikely(nb_tx < num)) {
270 /* Free mbufs not tx to NIC */
271 kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
272 kni_stats[port_id].tx_dropped += num - nb_tx;
278 main_loop(__rte_unused void *arg)
280 uint8_t i, nb_ports = rte_eth_dev_count();
282 const unsigned lcore_id = rte_lcore_id();
289 enum lcore_rxtx flag = LCORE_NONE;
291 for (i = 0; i < nb_ports; i++) {
292 if (!kni_port_params_array[i])
294 if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) {
297 } else if (kni_port_params_array[i]->lcore_tx ==
304 if (flag == LCORE_RX) {
305 RTE_LOG(INFO, APP, "Lcore %u is reading from port %d\n",
306 kni_port_params_array[i]->lcore_rx,
307 kni_port_params_array[i]->port_id);
309 f_stop = rte_atomic32_read(&kni_stop);
312 kni_ingress(kni_port_params_array[i]);
314 } else if (flag == LCORE_TX) {
315 RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
316 kni_port_params_array[i]->lcore_tx,
317 kni_port_params_array[i]->port_id);
319 f_stop = rte_atomic32_read(&kni_stop);
322 kni_egress(kni_port_params_array[i]);
325 RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
330 /* Display usage instructions */
332 print_usage(const char *prgname)
334 RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P "
335 "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
336 "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
337 " -p PORTMASK: hex bitmask of ports to use\n"
338 " -P : enable promiscuous mode\n"
339 " --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
340 "port and lcore configurations\n",
344 /* Convert string to unsigned number. 0 is returned if error occurs */
346 parse_unsigned(const char *portmask)
351 num = strtoul(portmask, &end, 16);
352 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
355 return (uint32_t)num;
362 struct kni_port_params **p = kni_port_params_array;
364 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
367 RTE_LOG(DEBUG, APP, "Port ID: %d\n", p[i]->port_id);
368 RTE_LOG(DEBUG, APP, "Rx lcore ID: %u, Tx lcore ID: %u\n",
369 p[i]->lcore_rx, p[i]->lcore_tx);
370 for (j = 0; j < p[i]->nb_lcore_k; j++)
371 RTE_LOG(DEBUG, APP, "Kernel thread lcore ID: %u\n",
377 parse_config(const char *arg)
379 const char *p, *p0 = arg;
386 _NUM_FLD = KNI_MAX_KTHREAD + 3,
389 char *str_fld[_NUM_FLD];
390 unsigned long int_fld[_NUM_FLD];
391 uint16_t port_id, nb_kni_port_params = 0;
393 memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
394 while (((p = strchr(p0, '(')) != NULL) &&
395 nb_kni_port_params < RTE_MAX_ETHPORTS) {
397 if ((p0 = strchr(p, ')')) == NULL)
400 if (size >= sizeof(s)) {
401 printf("Invalid config parameters\n");
404 snprintf(s, sizeof(s), "%.*s", size, p);
405 nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',');
406 if (nb_token <= FLD_LCORE_TX) {
407 printf("Invalid config parameters\n");
410 for (i = 0; i < nb_token; i++) {
412 int_fld[i] = strtoul(str_fld[i], &end, 0);
413 if (errno != 0 || end == str_fld[i]) {
414 printf("Invalid config parameters\n");
420 port_id = int_fld[i++];
421 if (port_id >= RTE_MAX_ETHPORTS) {
422 printf("Port ID %d could not exceed the maximum %d\n",
423 port_id, RTE_MAX_ETHPORTS);
426 if (kni_port_params_array[port_id]) {
427 printf("Port %d has been configured\n", port_id);
430 kni_port_params_array[port_id] =
431 rte_zmalloc("KNI_port_params",
432 sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
433 kni_port_params_array[port_id]->port_id = port_id;
434 kni_port_params_array[port_id]->lcore_rx =
435 (uint8_t)int_fld[i++];
436 kni_port_params_array[port_id]->lcore_tx =
437 (uint8_t)int_fld[i++];
438 if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE ||
439 kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) {
440 printf("lcore_rx %u or lcore_tx %u ID could not "
441 "exceed the maximum %u\n",
442 kni_port_params_array[port_id]->lcore_rx,
443 kni_port_params_array[port_id]->lcore_tx,
444 (unsigned)RTE_MAX_LCORE);
447 for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++)
448 kni_port_params_array[port_id]->lcore_k[j] =
450 kni_port_params_array[port_id]->nb_lcore_k = j;
457 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
458 if (kni_port_params_array[i]) {
459 rte_free(kni_port_params_array[i]);
460 kni_port_params_array[i] = NULL;
468 validate_parameters(uint32_t portmask)
473 printf("No port configured in port mask\n");
477 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
478 if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
479 (!(portmask & (1 << i)) && kni_port_params_array[i]))
480 rte_exit(EXIT_FAILURE, "portmask is not consistent "
481 "to port ids specified in --config\n");
483 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
484 (unsigned)(kni_port_params_array[i]->lcore_rx)))
485 rte_exit(EXIT_FAILURE, "lcore id %u for "
486 "port %d receiving not enabled\n",
487 kni_port_params_array[i]->lcore_rx,
488 kni_port_params_array[i]->port_id);
490 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
491 (unsigned)(kni_port_params_array[i]->lcore_tx)))
492 rte_exit(EXIT_FAILURE, "lcore id %u for "
493 "port %d transmitting not enabled\n",
494 kni_port_params_array[i]->lcore_tx,
495 kni_port_params_array[i]->port_id);
502 #define CMDLINE_OPT_CONFIG "config"
504 /* Parse the arguments given in the command line of the application */
506 parse_args(int argc, char **argv)
508 int opt, longindex, ret = 0;
509 const char *prgname = argv[0];
510 static struct option longopts[] = {
511 {CMDLINE_OPT_CONFIG, required_argument, NULL, 0},
515 /* Disable printing messages within getopt() */
518 /* Parse command line */
519 while ((opt = getopt_long(argc, argv, "p:P", longopts,
520 &longindex)) != EOF) {
523 ports_mask = parse_unsigned(optarg);
529 if (!strncmp(longopts[longindex].name,
531 sizeof(CMDLINE_OPT_CONFIG))) {
532 ret = parse_config(optarg);
534 printf("Invalid config\n");
535 print_usage(prgname);
541 print_usage(prgname);
542 rte_exit(EXIT_FAILURE, "Invalid option specified\n");
546 /* Check that options were parsed ok */
547 if (validate_parameters(ports_mask) < 0) {
548 print_usage(prgname);
549 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
555 /* Initialize KNI subsystem */
559 unsigned int num_of_kni_ports = 0, i;
560 struct kni_port_params **params = kni_port_params_array;
562 /* Calculate the maximum number of KNI interfaces that will be used */
563 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
564 if (kni_port_params_array[i]) {
565 num_of_kni_ports += (params[i]->nb_lcore_k ?
566 params[i]->nb_lcore_k : 1);
570 /* Invoke rte KNI init to preallocate the ports */
571 rte_kni_init(num_of_kni_ports);
574 /* Initialise a single port on an Ethernet device */
576 init_port(uint16_t port)
579 uint16_t nb_rxd = NB_RXD;
580 uint16_t nb_txd = NB_TXD;
582 /* Initialise device and RX/TX queues */
583 RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
585 ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
587 rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
588 (unsigned)port, ret);
590 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
592 rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
593 "for port%u (%d)\n", (unsigned)port, ret);
595 ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
596 rte_eth_dev_socket_id(port), NULL, pktmbuf_pool);
598 rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
599 "port%u (%d)\n", (unsigned)port, ret);
601 ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
602 rte_eth_dev_socket_id(port), NULL);
604 rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
605 "port%u (%d)\n", (unsigned)port, ret);
607 ret = rte_eth_dev_start(port);
609 rte_exit(EXIT_FAILURE, "Could not start port%u (%d)\n",
610 (unsigned)port, ret);
613 rte_eth_promiscuous_enable(port);
616 /* Check the link status of all ports in up to 9s, and print them finally */
618 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
620 #define CHECK_INTERVAL 100 /* 100ms */
621 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
623 uint8_t count, all_ports_up, print_flag = 0;
624 struct rte_eth_link link;
626 printf("\nChecking link status\n");
628 for (count = 0; count <= MAX_CHECK_TIME; count++) {
630 for (portid = 0; portid < port_num; portid++) {
631 if ((port_mask & (1 << portid)) == 0)
633 memset(&link, 0, sizeof(link));
634 rte_eth_link_get_nowait(portid, &link);
635 /* print link status if flag set */
636 if (print_flag == 1) {
637 if (link.link_status)
639 "Port%d Link Up - speed %uMbps - %s\n",
640 portid, link.link_speed,
641 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
642 ("full-duplex") : ("half-duplex\n"));
644 printf("Port %d Link Down\n", portid);
647 /* clear all_ports_up flag if any link down */
648 if (link.link_status == ETH_LINK_DOWN) {
653 /* after finally printing all link status, get out */
657 if (all_ports_up == 0) {
660 rte_delay_ms(CHECK_INTERVAL);
663 /* set the print_flag if all ports up or timeout */
664 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
671 /* Callback for request of changing MTU */
673 kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
676 struct rte_eth_conf conf;
678 if (port_id >= rte_eth_dev_count()) {
679 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
683 RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu);
685 /* Stop specific port */
686 rte_eth_dev_stop(port_id);
688 memcpy(&conf, &port_conf, sizeof(conf));
690 if (new_mtu > ETHER_MAX_LEN)
691 conf.rxmode.jumbo_frame = 1;
693 conf.rxmode.jumbo_frame = 0;
695 /* mtu + length of header + length of FCS = max pkt length */
696 conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
698 ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
700 RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
704 /* Restart specific port */
705 ret = rte_eth_dev_start(port_id);
707 RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id);
714 /* Callback for request of configuring network interface up/down */
716 kni_config_network_interface(uint16_t port_id, uint8_t if_up)
720 if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
721 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
725 RTE_LOG(INFO, APP, "Configure network interface of %d %s\n",
726 port_id, if_up ? "up" : "down");
728 if (if_up != 0) { /* Configure network interface up */
729 rte_eth_dev_stop(port_id);
730 ret = rte_eth_dev_start(port_id);
731 } else /* Configure network interface down */
732 rte_eth_dev_stop(port_id);
735 RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id);
741 kni_alloc(uint16_t port_id)
745 struct rte_kni_conf conf;
746 struct kni_port_params **params = kni_port_params_array;
748 if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
751 params[port_id]->nb_kni = params[port_id]->nb_lcore_k ?
752 params[port_id]->nb_lcore_k : 1;
754 for (i = 0; i < params[port_id]->nb_kni; i++) {
755 /* Clear conf at first */
756 memset(&conf, 0, sizeof(conf));
757 if (params[port_id]->nb_lcore_k) {
758 snprintf(conf.name, RTE_KNI_NAMESIZE,
759 "vEth%u_%u", port_id, i);
760 conf.core_id = params[port_id]->lcore_k[i];
763 snprintf(conf.name, RTE_KNI_NAMESIZE,
765 conf.group_id = port_id;
766 conf.mbuf_size = MAX_PACKET_SZ;
768 * The first KNI device associated to a port
769 * is the master, for multiple kernel thread
773 struct rte_kni_ops ops;
774 struct rte_eth_dev_info dev_info;
776 memset(&dev_info, 0, sizeof(dev_info));
777 rte_eth_dev_info_get(port_id, &dev_info);
779 if (dev_info.pci_dev) {
780 conf.addr = dev_info.pci_dev->addr;
781 conf.id = dev_info.pci_dev->id;
784 memset(&ops, 0, sizeof(ops));
785 ops.port_id = port_id;
786 ops.change_mtu = kni_change_mtu;
787 ops.config_network_if = kni_config_network_interface;
789 kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
791 kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);
794 rte_exit(EXIT_FAILURE, "Fail to create kni for "
795 "port: %d\n", port_id);
796 params[port_id]->kni[i] = kni;
803 kni_free_kni(uint16_t port_id)
806 struct kni_port_params **p = kni_port_params_array;
808 if (port_id >= RTE_MAX_ETHPORTS || !p[port_id])
811 for (i = 0; i < p[port_id]->nb_kni; i++) {
812 if (rte_kni_release(p[port_id]->kni[i]))
813 printf("Fail to release kni\n");
814 p[port_id]->kni[i] = NULL;
816 rte_eth_dev_stop(port_id);
821 /* Initialise ports/queues etc. and start main loop on each core */
823 main(int argc, char** argv)
826 uint16_t nb_sys_ports, port;
829 /* Associate signal_hanlder function with USR signals */
830 signal(SIGUSR1, signal_handler);
831 signal(SIGUSR2, signal_handler);
832 signal(SIGRTMIN, signal_handler);
833 signal(SIGINT, signal_handler);
836 ret = rte_eal_init(argc, argv);
838 rte_exit(EXIT_FAILURE, "Could not initialise EAL (%d)\n", ret);
842 /* Parse application arguments (after the EAL ones) */
843 ret = parse_args(argc, argv);
845 rte_exit(EXIT_FAILURE, "Could not parse input parameters\n");
847 /* Create the mbuf pool */
848 pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
849 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
850 if (pktmbuf_pool == NULL) {
851 rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool\n");
855 /* Get number of ports found in scan */
856 nb_sys_ports = rte_eth_dev_count();
857 if (nb_sys_ports == 0)
858 rte_exit(EXIT_FAILURE, "No supported Ethernet device found\n");
860 /* Check if the configured port ID is valid */
861 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
862 if (kni_port_params_array[i] && i >= nb_sys_ports)
863 rte_exit(EXIT_FAILURE, "Configured invalid "
866 /* Initialize KNI subsystem */
869 /* Initialise each port */
870 for (port = 0; port < nb_sys_ports; port++) {
871 /* Skip ports that are not enabled */
872 if (!(ports_mask & (1 << port)))
876 if (port >= RTE_MAX_ETHPORTS)
877 rte_exit(EXIT_FAILURE, "Can not use more than "
878 "%d ports for kni\n", RTE_MAX_ETHPORTS);
882 check_all_ports_link_status(nb_sys_ports, ports_mask);
884 /* Launch per-lcore function on every lcore */
885 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
886 RTE_LCORE_FOREACH_SLAVE(i) {
887 if (rte_eal_wait_lcore(i) < 0)
891 /* Release resources */
892 for (port = 0; port < nb_sys_ports; port++) {
893 if (!(ports_mask & (1 << port)))
897 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
898 if (kni_port_params_array[i]) {
899 rte_free(kni_port_params_array[i]);
900 kni_port_params_array[i] = NULL;