1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
15 #include <netinet/in.h>
17 #include <linux/if_tun.h>
19 #include <sys/ioctl.h>
23 #include <rte_common.h>
25 #include <rte_memory.h>
26 #include <rte_memcpy.h>
28 #include <rte_per_lcore.h>
29 #include <rte_launch.h>
30 #include <rte_atomic.h>
31 #include <rte_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_bus_pci.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_malloc.h>
45 /* Macros for printing using RTE_LOG */
46 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
48 /* Max size of a single packet */
49 #define MAX_PACKET_SZ 2048
51 /* Size of the data buffer in each mbuf */
52 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
54 /* Number of mbufs in mempool that is created */
55 #define NB_MBUF (8192 * 16)
57 /* How many packets to attempt to read from NIC in one go */
58 #define PKT_BURST_SZ 32
60 /* How many objects (mbufs) to keep in per-lcore mempool cache */
61 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
63 /* Number of RX ring descriptors */
66 /* Number of TX ring descriptors */
69 /* Total octets in ethernet header */
70 #define KNI_ENET_HEADER_SIZE 14
72 /* Total octets in the FCS */
73 #define KNI_ENET_FCS_SIZE 4
75 #define KNI_US_PER_SECOND 1000000
76 #define KNI_SECOND_PER_DAY 86400
78 #define KNI_MAX_KTHREAD 32
80 * Structure of port parameters
82 struct kni_port_params {
83 uint16_t port_id;/* Port ID */
84 unsigned lcore_rx; /* lcore ID for RX */
85 unsigned lcore_tx; /* lcore ID for TX */
86 uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
87 uint32_t nb_kni; /* Number of KNI devices to be created */
88 unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */
89 struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */
90 } __rte_cache_aligned;
92 static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
95 /* Options for configuring ethernet port */
96 static struct rte_eth_conf port_conf = {
98 .mq_mode = ETH_MQ_TX_NONE,
102 /* Mempool for mbufs */
103 static struct rte_mempool * pktmbuf_pool = NULL;
105 /* Mask of enabled ports */
106 static uint32_t ports_mask = 0;
107 /* Ports set in promiscuous mode off by default. */
108 static int promiscuous_on = 0;
109 /* Monitor link status continually. off by default. */
110 static int monitor_links;
112 /* Structure type for recording kni interface specific stats */
113 struct kni_interface_stats {
114 /* number of pkts received from NIC, and sent to KNI */
117 /* number of pkts received from NIC, but failed to send to KNI */
120 /* number of pkts received from KNI, and sent to NIC */
123 /* number of pkts received from KNI, but failed to send to NIC */
127 /* kni device statistics array */
128 static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
130 static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
131 static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
132 static int kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]);
134 static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
135 static rte_atomic32_t kni_pause = RTE_ATOMIC32_INIT(0);
137 /* Print out statistics on packets handled */
143 printf("\n**KNI example application statistics**\n"
144 "====== ============== ============ ============ ============ ============\n"
145 " Port Lcore(RX/TX) rx_packets rx_dropped tx_packets tx_dropped\n"
146 "------ -------------- ------------ ------------ ------------ ------------\n");
147 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
148 if (!kni_port_params_array[i])
151 printf("%7d %10u/%2u %13"PRIu64" %13"PRIu64" %13"PRIu64" "
153 kni_port_params_array[i]->lcore_rx,
154 kni_port_params_array[i]->lcore_tx,
155 kni_stats[i].rx_packets,
156 kni_stats[i].rx_dropped,
157 kni_stats[i].tx_packets,
158 kni_stats[i].tx_dropped);
160 printf("====== ============== ============ ============ ============ ============\n");
163 /* Custom handling of signals to handle stats and kni processing */
165 signal_handler(int signum)
167 /* When we receive a USR1 signal, print stats */
168 if (signum == SIGUSR1) {
172 /* When we receive a USR2 signal, reset stats */
173 if (signum == SIGUSR2) {
174 memset(&kni_stats, 0, sizeof(kni_stats));
175 printf("\n** Statistics have been reset **\n");
179 /* When we receive a RTMIN or SIGINT signal, stop kni processing */
180 if (signum == SIGRTMIN || signum == SIGINT){
181 printf("\nSIGRTMIN/SIGINT received. KNI processing stopping.\n");
182 rte_atomic32_inc(&kni_stop);
188 kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
195 for (i = 0; i < num; i++) {
196 rte_pktmbuf_free(pkts[i]);
202 * Interface to burst rx and enqueue mbufs into rx_q
205 kni_ingress(struct kni_port_params *p)
211 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
217 port_id = p->port_id;
218 for (i = 0; i < nb_kni; i++) {
219 /* Burst rx from eth */
220 nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ);
221 if (unlikely(nb_rx > PKT_BURST_SZ)) {
222 RTE_LOG(ERR, APP, "Error receiving from eth\n");
225 /* Burst tx to kni */
226 num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
228 kni_stats[port_id].rx_packets += num;
230 rte_kni_handle_request(p->kni[i]);
231 if (unlikely(num < nb_rx)) {
232 /* Free mbufs not tx to kni interface */
233 kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
234 kni_stats[port_id].rx_dropped += nb_rx - num;
240 * Interface to dequeue mbufs from tx_q and burst tx
243 kni_egress(struct kni_port_params *p)
249 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
255 port_id = p->port_id;
256 for (i = 0; i < nb_kni; i++) {
257 /* Burst rx from kni */
258 num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
259 if (unlikely(num > PKT_BURST_SZ)) {
260 RTE_LOG(ERR, APP, "Error receiving from KNI\n");
263 /* Burst tx to eth */
264 nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
266 kni_stats[port_id].tx_packets += nb_tx;
267 if (unlikely(nb_tx < num)) {
268 /* Free mbufs not tx to NIC */
269 kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
270 kni_stats[port_id].tx_dropped += num - nb_tx;
276 main_loop(__rte_unused void *arg)
281 const unsigned lcore_id = rte_lcore_id();
288 enum lcore_rxtx flag = LCORE_NONE;
290 RTE_ETH_FOREACH_DEV(i) {
291 if (!kni_port_params_array[i])
293 if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) {
296 } else if (kni_port_params_array[i]->lcore_tx ==
303 if (flag == LCORE_RX) {
304 RTE_LOG(INFO, APP, "Lcore %u is reading from port %d\n",
305 kni_port_params_array[i]->lcore_rx,
306 kni_port_params_array[i]->port_id);
308 f_stop = rte_atomic32_read(&kni_stop);
309 f_pause = rte_atomic32_read(&kni_pause);
314 kni_ingress(kni_port_params_array[i]);
316 } else if (flag == LCORE_TX) {
317 RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
318 kni_port_params_array[i]->lcore_tx,
319 kni_port_params_array[i]->port_id);
321 f_stop = rte_atomic32_read(&kni_stop);
322 f_pause = rte_atomic32_read(&kni_pause);
327 kni_egress(kni_port_params_array[i]);
330 RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
335 /* Display usage instructions */
337 print_usage(const char *prgname)
339 RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P -m "
340 "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
341 "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
342 " -p PORTMASK: hex bitmask of ports to use\n"
343 " -P : enable promiscuous mode\n"
344 " -m : enable monitoring of port carrier state\n"
345 " --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
346 "port and lcore configurations\n",
350 /* Convert string to unsigned number. 0 is returned if error occurs */
352 parse_unsigned(const char *portmask)
357 num = strtoul(portmask, &end, 16);
358 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
361 return (uint32_t)num;
368 struct kni_port_params **p = kni_port_params_array;
370 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
373 RTE_LOG(DEBUG, APP, "Port ID: %d\n", p[i]->port_id);
374 RTE_LOG(DEBUG, APP, "Rx lcore ID: %u, Tx lcore ID: %u\n",
375 p[i]->lcore_rx, p[i]->lcore_tx);
376 for (j = 0; j < p[i]->nb_lcore_k; j++)
377 RTE_LOG(DEBUG, APP, "Kernel thread lcore ID: %u\n",
383 parse_config(const char *arg)
385 const char *p, *p0 = arg;
392 _NUM_FLD = KNI_MAX_KTHREAD + 3,
395 char *str_fld[_NUM_FLD];
396 unsigned long int_fld[_NUM_FLD];
397 uint16_t port_id, nb_kni_port_params = 0;
399 memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
400 while (((p = strchr(p0, '(')) != NULL) &&
401 nb_kni_port_params < RTE_MAX_ETHPORTS) {
403 if ((p0 = strchr(p, ')')) == NULL)
406 if (size >= sizeof(s)) {
407 printf("Invalid config parameters\n");
410 snprintf(s, sizeof(s), "%.*s", size, p);
411 nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',');
412 if (nb_token <= FLD_LCORE_TX) {
413 printf("Invalid config parameters\n");
416 for (i = 0; i < nb_token; i++) {
418 int_fld[i] = strtoul(str_fld[i], &end, 0);
419 if (errno != 0 || end == str_fld[i]) {
420 printf("Invalid config parameters\n");
426 port_id = int_fld[i++];
427 if (port_id >= RTE_MAX_ETHPORTS) {
428 printf("Port ID %d could not exceed the maximum %d\n",
429 port_id, RTE_MAX_ETHPORTS);
432 if (kni_port_params_array[port_id]) {
433 printf("Port %d has been configured\n", port_id);
436 kni_port_params_array[port_id] =
437 rte_zmalloc("KNI_port_params",
438 sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
439 kni_port_params_array[port_id]->port_id = port_id;
440 kni_port_params_array[port_id]->lcore_rx =
441 (uint8_t)int_fld[i++];
442 kni_port_params_array[port_id]->lcore_tx =
443 (uint8_t)int_fld[i++];
444 if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE ||
445 kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) {
446 printf("lcore_rx %u or lcore_tx %u ID could not "
447 "exceed the maximum %u\n",
448 kni_port_params_array[port_id]->lcore_rx,
449 kni_port_params_array[port_id]->lcore_tx,
450 (unsigned)RTE_MAX_LCORE);
453 for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++)
454 kni_port_params_array[port_id]->lcore_k[j] =
456 kni_port_params_array[port_id]->nb_lcore_k = j;
463 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
464 if (kni_port_params_array[i]) {
465 rte_free(kni_port_params_array[i]);
466 kni_port_params_array[i] = NULL;
474 validate_parameters(uint32_t portmask)
479 printf("No port configured in port mask\n");
483 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
484 if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
485 (!(portmask & (1 << i)) && kni_port_params_array[i]))
486 rte_exit(EXIT_FAILURE, "portmask is not consistent "
487 "to port ids specified in --config\n");
489 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
490 (unsigned)(kni_port_params_array[i]->lcore_rx)))
491 rte_exit(EXIT_FAILURE, "lcore id %u for "
492 "port %d receiving not enabled\n",
493 kni_port_params_array[i]->lcore_rx,
494 kni_port_params_array[i]->port_id);
496 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
497 (unsigned)(kni_port_params_array[i]->lcore_tx)))
498 rte_exit(EXIT_FAILURE, "lcore id %u for "
499 "port %d transmitting not enabled\n",
500 kni_port_params_array[i]->lcore_tx,
501 kni_port_params_array[i]->port_id);
508 #define CMDLINE_OPT_CONFIG "config"
510 /* Parse the arguments given in the command line of the application */
512 parse_args(int argc, char **argv)
514 int opt, longindex, ret = 0;
515 const char *prgname = argv[0];
516 static struct option longopts[] = {
517 {CMDLINE_OPT_CONFIG, required_argument, NULL, 0},
521 /* Disable printing messages within getopt() */
524 /* Parse command line */
525 while ((opt = getopt_long(argc, argv, "p:Pm", longopts,
526 &longindex)) != EOF) {
529 ports_mask = parse_unsigned(optarg);
538 if (!strncmp(longopts[longindex].name,
540 sizeof(CMDLINE_OPT_CONFIG))) {
541 ret = parse_config(optarg);
543 printf("Invalid config\n");
544 print_usage(prgname);
550 print_usage(prgname);
551 rte_exit(EXIT_FAILURE, "Invalid option specified\n");
555 /* Check that options were parsed ok */
556 if (validate_parameters(ports_mask) < 0) {
557 print_usage(prgname);
558 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
564 /* Initialize KNI subsystem */
568 unsigned int num_of_kni_ports = 0, i;
569 struct kni_port_params **params = kni_port_params_array;
571 /* Calculate the maximum number of KNI interfaces that will be used */
572 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
573 if (kni_port_params_array[i]) {
574 num_of_kni_ports += (params[i]->nb_lcore_k ?
575 params[i]->nb_lcore_k : 1);
579 /* Invoke rte KNI init to preallocate the ports */
580 rte_kni_init(num_of_kni_ports);
583 /* Initialise a single port on an Ethernet device */
585 init_port(uint16_t port)
588 uint16_t nb_rxd = NB_RXD;
589 uint16_t nb_txd = NB_TXD;
590 struct rte_eth_dev_info dev_info;
591 struct rte_eth_rxconf rxq_conf;
592 struct rte_eth_txconf txq_conf;
593 struct rte_eth_conf local_port_conf = port_conf;
595 /* Initialise device and RX/TX queues */
596 RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
599 ret = rte_eth_dev_info_get(port, &dev_info);
601 rte_exit(EXIT_FAILURE,
602 "Error during getting device (port %u) info: %s\n",
603 port, strerror(-ret));
605 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
606 local_port_conf.txmode.offloads |=
607 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
608 ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf);
610 rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
611 (unsigned)port, ret);
613 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
615 rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
616 "for port%u (%d)\n", (unsigned)port, ret);
618 rxq_conf = dev_info.default_rxconf;
619 rxq_conf.offloads = local_port_conf.rxmode.offloads;
620 ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
621 rte_eth_dev_socket_id(port), &rxq_conf, pktmbuf_pool);
623 rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
624 "port%u (%d)\n", (unsigned)port, ret);
626 txq_conf = dev_info.default_txconf;
627 txq_conf.offloads = local_port_conf.txmode.offloads;
628 ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
629 rte_eth_dev_socket_id(port), &txq_conf);
631 rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
632 "port%u (%d)\n", (unsigned)port, ret);
634 ret = rte_eth_dev_start(port);
636 rte_exit(EXIT_FAILURE, "Could not start port%u (%d)\n",
637 (unsigned)port, ret);
639 if (promiscuous_on) {
640 ret = rte_eth_promiscuous_enable(port);
642 rte_exit(EXIT_FAILURE,
643 "Could not enable promiscuous mode for port%u: %s\n",
644 port, rte_strerror(-ret));
648 /* Check the link status of all ports in up to 9s, and print them finally */
650 check_all_ports_link_status(uint32_t port_mask)
652 #define CHECK_INTERVAL 100 /* 100ms */
653 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
655 uint8_t count, all_ports_up, print_flag = 0;
656 struct rte_eth_link link;
659 printf("\nChecking link status\n");
661 for (count = 0; count <= MAX_CHECK_TIME; count++) {
663 RTE_ETH_FOREACH_DEV(portid) {
664 if ((port_mask & (1 << portid)) == 0)
666 memset(&link, 0, sizeof(link));
667 ret = rte_eth_link_get_nowait(portid, &link);
671 printf("Port %u link get failed: %s\n",
672 portid, rte_strerror(-ret));
675 /* print link status if flag set */
676 if (print_flag == 1) {
677 if (link.link_status)
679 "Port%d Link Up - speed %uMbps - %s\n",
680 portid, link.link_speed,
681 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
682 ("full-duplex") : ("half-duplex\n"));
684 printf("Port %d Link Down\n", portid);
687 /* clear all_ports_up flag if any link down */
688 if (link.link_status == ETH_LINK_DOWN) {
693 /* after finally printing all link status, get out */
697 if (all_ports_up == 0) {
700 rte_delay_ms(CHECK_INTERVAL);
703 /* set the print_flag if all ports up or timeout */
704 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
712 log_link_state(struct rte_kni *kni, int prev, struct rte_eth_link *link)
714 if (kni == NULL || link == NULL)
717 if (prev == ETH_LINK_DOWN && link->link_status == ETH_LINK_UP) {
718 RTE_LOG(INFO, APP, "%s NIC Link is Up %d Mbps %s %s.\n",
719 rte_kni_get_name(kni),
721 link->link_autoneg ? "(AutoNeg)" : "(Fixed)",
722 link->link_duplex ? "Full Duplex" : "Half Duplex");
723 } else if (prev == ETH_LINK_UP && link->link_status == ETH_LINK_DOWN) {
724 RTE_LOG(INFO, APP, "%s NIC Link is Down.\n",
725 rte_kni_get_name(kni));
730 * Monitor the link status of all ports and update the
731 * corresponding KNI interface(s)
734 monitor_all_ports_link_status(void *arg)
737 struct rte_eth_link link;
739 struct kni_port_params **p = kni_port_params_array;
744 while (monitor_links) {
746 RTE_ETH_FOREACH_DEV(portid) {
747 if ((ports_mask & (1 << portid)) == 0)
749 memset(&link, 0, sizeof(link));
750 ret = rte_eth_link_get_nowait(portid, &link);
753 "Get link failed (port %u): %s\n",
754 portid, rte_strerror(-ret));
757 for (i = 0; i < p[portid]->nb_kni; i++) {
758 prev = rte_kni_update_link(p[portid]->kni[i],
760 log_link_state(p[portid]->kni[i], prev, &link);
767 /* Callback for request of changing MTU */
769 kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
772 uint16_t nb_rxd = NB_RXD;
773 struct rte_eth_conf conf;
774 struct rte_eth_dev_info dev_info;
775 struct rte_eth_rxconf rxq_conf;
777 if (!rte_eth_dev_is_valid_port(port_id)) {
778 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
782 RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu);
784 /* Stop specific port */
785 rte_eth_dev_stop(port_id);
787 memcpy(&conf, &port_conf, sizeof(conf));
789 if (new_mtu > RTE_ETHER_MAX_LEN)
790 conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
792 conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
794 /* mtu + length of header + length of FCS = max pkt length */
795 conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
797 ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
799 RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
803 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, NULL);
805 rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
806 "for port%u (%d)\n", (unsigned int)port_id,
809 ret = rte_eth_dev_info_get(port_id, &dev_info);
812 "Error during getting device (port %u) info: %s\n",
813 port_id, strerror(-ret));
818 rxq_conf = dev_info.default_rxconf;
819 rxq_conf.offloads = conf.rxmode.offloads;
820 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
821 rte_eth_dev_socket_id(port_id), &rxq_conf, pktmbuf_pool);
823 RTE_LOG(ERR, APP, "Fail to setup Rx queue of port %d\n",
828 /* Restart specific port */
829 ret = rte_eth_dev_start(port_id);
831 RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id);
838 /* Callback for request of configuring network interface up/down */
840 kni_config_network_interface(uint16_t port_id, uint8_t if_up)
844 if (!rte_eth_dev_is_valid_port(port_id)) {
845 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
849 RTE_LOG(INFO, APP, "Configure network interface of %d %s\n",
850 port_id, if_up ? "up" : "down");
852 rte_atomic32_inc(&kni_pause);
854 if (if_up != 0) { /* Configure network interface up */
855 rte_eth_dev_stop(port_id);
856 ret = rte_eth_dev_start(port_id);
857 } else /* Configure network interface down */
858 rte_eth_dev_stop(port_id);
860 rte_atomic32_dec(&kni_pause);
863 RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id);
869 print_ethaddr(const char *name, struct rte_ether_addr *mac_addr)
871 char buf[RTE_ETHER_ADDR_FMT_SIZE];
872 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
873 RTE_LOG(INFO, APP, "\t%s%s\n", name, buf);
876 /* Callback for request of configuring mac address */
878 kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
882 if (!rte_eth_dev_is_valid_port(port_id)) {
883 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
887 RTE_LOG(INFO, APP, "Configure mac address of %d\n", port_id);
888 print_ethaddr("Address:", (struct rte_ether_addr *)mac_addr);
890 ret = rte_eth_dev_default_mac_addr_set(port_id,
891 (struct rte_ether_addr *)mac_addr);
893 RTE_LOG(ERR, APP, "Failed to config mac_addr for port %d\n",
900 kni_alloc(uint16_t port_id)
904 struct rte_kni_conf conf;
905 struct kni_port_params **params = kni_port_params_array;
908 if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
911 params[port_id]->nb_kni = params[port_id]->nb_lcore_k ?
912 params[port_id]->nb_lcore_k : 1;
914 for (i = 0; i < params[port_id]->nb_kni; i++) {
915 /* Clear conf at first */
916 memset(&conf, 0, sizeof(conf));
917 if (params[port_id]->nb_lcore_k) {
918 snprintf(conf.name, RTE_KNI_NAMESIZE,
919 "vEth%u_%u", port_id, i);
920 conf.core_id = params[port_id]->lcore_k[i];
923 snprintf(conf.name, RTE_KNI_NAMESIZE,
925 conf.group_id = port_id;
926 conf.mbuf_size = MAX_PACKET_SZ;
928 * The first KNI device associated to a port
929 * is the master, for multiple kernel thread
933 struct rte_kni_ops ops;
934 struct rte_eth_dev_info dev_info;
936 ret = rte_eth_dev_info_get(port_id, &dev_info);
938 rte_exit(EXIT_FAILURE,
939 "Error during getting device (port %u) info: %s\n",
940 port_id, strerror(-ret));
942 /* Get the interface default mac address */
943 ret = rte_eth_macaddr_get(port_id,
944 (struct rte_ether_addr *)&conf.mac_addr);
946 rte_exit(EXIT_FAILURE,
947 "Failed to get MAC address (port %u): %s\n",
948 port_id, rte_strerror(-ret));
950 rte_eth_dev_get_mtu(port_id, &conf.mtu);
952 conf.min_mtu = dev_info.min_mtu;
953 conf.max_mtu = dev_info.max_mtu;
955 memset(&ops, 0, sizeof(ops));
956 ops.port_id = port_id;
957 ops.change_mtu = kni_change_mtu;
958 ops.config_network_if = kni_config_network_interface;
959 ops.config_mac_address = kni_config_mac_address;
961 kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
963 kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);
966 rte_exit(EXIT_FAILURE, "Fail to create kni for "
967 "port: %d\n", port_id);
968 params[port_id]->kni[i] = kni;
975 kni_free_kni(uint16_t port_id)
978 struct kni_port_params **p = kni_port_params_array;
980 if (port_id >= RTE_MAX_ETHPORTS || !p[port_id])
983 for (i = 0; i < p[port_id]->nb_kni; i++) {
984 if (rte_kni_release(p[port_id]->kni[i]))
985 printf("Fail to release kni\n");
986 p[port_id]->kni[i] = NULL;
988 rte_eth_dev_stop(port_id);
993 /* Initialise ports/queues etc. and start main loop on each core */
995 main(int argc, char** argv)
998 uint16_t nb_sys_ports, port;
1001 pthread_t kni_link_tid;
1004 /* Associate signal_hanlder function with USR signals */
1005 signal(SIGUSR1, signal_handler);
1006 signal(SIGUSR2, signal_handler);
1007 signal(SIGRTMIN, signal_handler);
1008 signal(SIGINT, signal_handler);
1010 /* Initialise EAL */
1011 ret = rte_eal_init(argc, argv);
1013 rte_exit(EXIT_FAILURE, "Could not initialise EAL (%d)\n", ret);
1017 /* Parse application arguments (after the EAL ones) */
1018 ret = parse_args(argc, argv);
1020 rte_exit(EXIT_FAILURE, "Could not parse input parameters\n");
1022 /* Create the mbuf pool */
1023 pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
1024 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
1025 if (pktmbuf_pool == NULL) {
1026 rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool\n");
1030 /* Get number of ports found in scan */
1031 nb_sys_ports = rte_eth_dev_count_avail();
1032 if (nb_sys_ports == 0)
1033 rte_exit(EXIT_FAILURE, "No supported Ethernet device found\n");
1035 /* Check if the configured port ID is valid */
1036 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
1037 if (kni_port_params_array[i] && !rte_eth_dev_is_valid_port(i))
1038 rte_exit(EXIT_FAILURE, "Configured invalid "
1041 /* Initialize KNI subsystem */
1044 /* Initialise each port */
1045 RTE_ETH_FOREACH_DEV(port) {
1046 /* Skip ports that are not enabled */
1047 if (!(ports_mask & (1 << port)))
1051 if (port >= RTE_MAX_ETHPORTS)
1052 rte_exit(EXIT_FAILURE, "Can not use more than "
1053 "%d ports for kni\n", RTE_MAX_ETHPORTS);
1057 check_all_ports_link_status(ports_mask);
1060 RTE_LOG(INFO, APP, "========================\n");
1061 RTE_LOG(INFO, APP, "KNI Running\n");
1062 RTE_LOG(INFO, APP, "kill -SIGUSR1 %d\n", pid);
1063 RTE_LOG(INFO, APP, " Show KNI Statistics.\n");
1064 RTE_LOG(INFO, APP, "kill -SIGUSR2 %d\n", pid);
1065 RTE_LOG(INFO, APP, " Zero KNI Statistics.\n");
1066 RTE_LOG(INFO, APP, "========================\n");
1069 ret = rte_ctrl_thread_create(&kni_link_tid,
1070 "KNI link status check", NULL,
1071 monitor_all_ports_link_status, NULL);
1073 rte_exit(EXIT_FAILURE,
1074 "Could not create link status thread!\n");
1076 /* Launch per-lcore function on every lcore */
1077 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1078 RTE_LCORE_FOREACH_SLAVE(i) {
1079 if (rte_eal_wait_lcore(i) < 0)
1083 pthread_join(kni_link_tid, &retval);
1085 /* Release resources */
1086 RTE_ETH_FOREACH_DEV(port) {
1087 if (!(ports_mask & (1 << port)))
1091 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
1092 if (kni_port_params_array[i]) {
1093 rte_free(kni_port_params_array[i]);
1094 kni_port_params_array[i] = NULL;