4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
44 #include <netinet/in.h>
46 #include <linux/if_tun.h>
48 #include <sys/ioctl.h>
52 #include <rte_common.h>
54 #include <rte_memory.h>
55 #include <rte_memcpy.h>
56 #include <rte_memzone.h>
57 #include <rte_tailq.h>
59 #include <rte_per_lcore.h>
60 #include <rte_launch.h>
61 #include <rte_atomic.h>
62 #include <rte_lcore.h>
63 #include <rte_branch_prediction.h>
64 #include <rte_interrupts.h>
66 #include <rte_debug.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
71 #include <rte_mempool.h>
73 #include <rte_string_fns.h>
74 #include <rte_cycles.h>
75 #include <rte_malloc.h>
78 /* Macros for printing using RTE_LOG */
79 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
81 /* Max size of a single packet */
82 #define MAX_PACKET_SZ 2048
84 /* Number of bytes needed for each mbuf */
86 (MAX_PACKET_SZ + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
88 /* Number of mbufs in mempool that is created */
89 #define NB_MBUF (8192 * 16)
91 /* How many packets to attempt to read from NIC in one go */
92 #define PKT_BURST_SZ 32
94 /* How many objects (mbufs) to keep in per-lcore mempool cache */
95 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
97 /* Number of RX ring descriptors */
100 /* Number of TX ring descriptors */
103 /* Total octets in ethernet header */
104 #define KNI_ENET_HEADER_SIZE 14
106 /* Total octets in the FCS */
107 #define KNI_ENET_FCS_SIZE 4
109 #define KNI_US_PER_SECOND 1000000
110 #define KNI_SECOND_PER_DAY 86400
112 #define KNI_MAX_KTHREAD 32
114 * Structure of port parameters
116 struct kni_port_params {
117 uint8_t port_id;/* Port ID */
118 unsigned lcore_rx; /* lcore ID for RX */
119 unsigned lcore_tx; /* lcore ID for TX */
120 uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
121 uint32_t nb_kni; /* Number of KNI devices to be created */
122 unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */
123 struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */
124 } __rte_cache_aligned;
126 static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
128 /* RX and TX Prefetch, Host, and Write-back threshold values should be
129 * carefully set for optimal performance. Consult the network
130 * controller's datasheet and supporting DPDK documentation for guidance
131 * on how these parameters should be set.
133 /* RX ring configuration */
134 static const struct rte_eth_rxconf rx_conf = {
136 .pthresh = 8, /* Ring prefetch threshold */
137 .hthresh = 8, /* Ring host threshold */
138 .wthresh = 4, /* Ring writeback threshold */
140 .rx_free_thresh = 0, /* Immediately free RX descriptors */
144 * These default values are optimized for use with the Intel(R) 82599 10 GbE
145 * Controller and the DPDK ixgbe PMD. Consider using other values for other
146 * network controllers and/or network drivers.
148 /* TX ring configuration */
149 static const struct rte_eth_txconf tx_conf = {
151 .pthresh = 36, /* Ring prefetch threshold */
152 .hthresh = 0, /* Ring host threshold */
153 .wthresh = 0, /* Ring writeback threshold */
155 .tx_free_thresh = 0, /* Use PMD default values */
156 .tx_rs_thresh = 0, /* Use PMD default values */
159 /* Options for configuring ethernet port */
160 static struct rte_eth_conf port_conf = {
162 .header_split = 0, /* Header Split disabled */
163 .hw_ip_checksum = 0, /* IP checksum offload disabled */
164 .hw_vlan_filter = 0, /* VLAN filtering disabled */
165 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
166 .hw_strip_crc = 0, /* CRC stripped by hardware */
169 .mq_mode = ETH_MQ_TX_NONE,
173 /* Mempool for mbufs */
174 static struct rte_mempool * pktmbuf_pool = NULL;
176 /* Mask of enabled ports */
177 static uint32_t ports_mask = 0;
178 /* Ports set in promiscuous mode off by default. */
179 static int promiscuous_on = 0;
181 /* Structure type for recording kni interface specific stats */
182 struct kni_interface_stats {
183 /* number of pkts received from NIC, and sent to KNI */
186 /* number of pkts received from NIC, but failed to send to KNI */
189 /* number of pkts received from KNI, and sent to NIC */
192 /* number of pkts received from KNI, but failed to send to NIC */
196 /* kni device statistics array */
197 static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
199 static int kni_change_mtu(uint8_t port_id, unsigned new_mtu);
200 static int kni_config_network_interface(uint8_t port_id, uint8_t if_up);
202 static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
204 /* Print out statistics on packets handled */
210 printf("\n**KNI example application statistics**\n"
211 "====== ============== ============ ============ ============ ============\n"
212 " Port Lcore(RX/TX) rx_packets rx_dropped tx_packets tx_dropped\n"
213 "------ -------------- ------------ ------------ ------------ ------------\n");
214 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
215 if (!kni_port_params_array[i])
218 printf("%7d %10u/%2u %13"PRIu64" %13"PRIu64" %13"PRIu64" "
220 kni_port_params_array[i]->lcore_rx,
221 kni_port_params_array[i]->lcore_tx,
222 kni_stats[i].rx_packets,
223 kni_stats[i].rx_dropped,
224 kni_stats[i].tx_packets,
225 kni_stats[i].tx_dropped);
227 printf("====== ============== ============ ============ ============ ============\n");
230 /* Custom handling of signals to handle stats and kni processing */
232 signal_handler(int signum)
234 /* When we receive a USR1 signal, print stats */
235 if (signum == SIGUSR1) {
239 /* When we receive a USR2 signal, reset stats */
240 if (signum == SIGUSR2) {
241 memset(&kni_stats, 0, sizeof(kni_stats));
242 printf("\n**Statistics have been reset**\n");
246 /* When we receive a RTMIN signal, stop kni processing */
247 if (signum == SIGRTMIN) {
248 printf("SIGRTMIN is received, and the KNI processing is "
250 rte_atomic32_inc(&kni_stop);
256 kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
263 for (i = 0; i < num; i++) {
264 rte_pktmbuf_free(pkts[i]);
270 * Interface to burst rx and enqueue mbufs into rx_q
273 kni_ingress(struct kni_port_params *p)
278 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
284 port_id = p->port_id;
285 for (i = 0; i < nb_kni; i++) {
286 /* Burst rx from eth */
287 nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ);
288 if (unlikely(nb_rx > PKT_BURST_SZ)) {
289 RTE_LOG(ERR, APP, "Error receiving from eth\n");
292 /* Burst tx to kni */
293 num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
294 kni_stats[port_id].rx_packets += num;
296 rte_kni_handle_request(p->kni[i]);
297 if (unlikely(num < nb_rx)) {
298 /* Free mbufs not tx to kni interface */
299 kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
300 kni_stats[port_id].rx_dropped += nb_rx - num;
306 * Interface to dequeue mbufs from tx_q and burst tx
309 kni_egress(struct kni_port_params *p)
314 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
320 port_id = p->port_id;
321 for (i = 0; i < nb_kni; i++) {
322 /* Burst rx from kni */
323 num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
324 if (unlikely(num > PKT_BURST_SZ)) {
325 RTE_LOG(ERR, APP, "Error receiving from KNI\n");
328 /* Burst tx to eth */
329 nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
330 kni_stats[port_id].tx_packets += nb_tx;
331 if (unlikely(nb_tx < num)) {
332 /* Free mbufs not tx to NIC */
333 kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
334 kni_stats[port_id].tx_dropped += num - nb_tx;
340 main_loop(__rte_unused void *arg)
342 uint8_t i, nb_ports = rte_eth_dev_count();
344 const unsigned lcore_id = rte_lcore_id();
351 enum lcore_rxtx flag = LCORE_NONE;
353 nb_ports = (uint8_t)(nb_ports < RTE_MAX_ETHPORTS ?
354 nb_ports : RTE_MAX_ETHPORTS);
355 for (i = 0; i < nb_ports; i++) {
356 if (!kni_port_params_array[i])
358 if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) {
361 } else if (kni_port_params_array[i]->lcore_tx ==
368 if (flag == LCORE_RX) {
369 RTE_LOG(INFO, APP, "Lcore %u is reading from port %d\n",
370 kni_port_params_array[i]->lcore_rx,
371 kni_port_params_array[i]->port_id);
373 f_stop = rte_atomic32_read(&kni_stop);
376 kni_ingress(kni_port_params_array[i]);
378 } else if (flag == LCORE_TX) {
379 RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
380 kni_port_params_array[i]->lcore_tx,
381 kni_port_params_array[i]->port_id);
383 f_stop = rte_atomic32_read(&kni_stop);
386 kni_egress(kni_port_params_array[i]);
389 RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
394 /* Display usage instructions */
396 print_usage(const char *prgname)
398 RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P "
399 "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
400 "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
401 " -p PORTMASK: hex bitmask of ports to use\n"
402 " -P : enable promiscuous mode\n"
403 " --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
404 "port and lcore configurations\n",
408 /* Convert string to unsigned number. 0 is returned if error occurs */
410 parse_unsigned(const char *portmask)
415 num = strtoul(portmask, &end, 16);
416 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
419 return (uint32_t)num;
426 struct kni_port_params **p = kni_port_params_array;
428 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
431 RTE_LOG(DEBUG, APP, "Port ID: %d\n", p[i]->port_id);
432 RTE_LOG(DEBUG, APP, "Rx lcore ID: %u, Tx lcore ID: %u\n",
433 p[i]->lcore_rx, p[i]->lcore_tx);
434 for (j = 0; j < p[i]->nb_lcore_k; j++)
435 RTE_LOG(DEBUG, APP, "Kernel thread lcore ID: %u\n",
441 parse_config(const char *arg)
443 const char *p, *p0 = arg;
450 _NUM_FLD = KNI_MAX_KTHREAD + 3,
453 char *str_fld[_NUM_FLD];
454 unsigned long int_fld[_NUM_FLD];
455 uint8_t port_id, nb_kni_port_params = 0;
457 memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
458 while (((p = strchr(p0, '(')) != NULL) &&
459 nb_kni_port_params < RTE_MAX_ETHPORTS) {
461 if ((p0 = strchr(p, ')')) == NULL)
464 if (size >= sizeof(s)) {
465 printf("Invalid config parameters\n");
468 rte_snprintf(s, sizeof(s), "%.*s", size, p);
469 nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',');
470 if (nb_token <= FLD_LCORE_TX) {
471 printf("Invalid config parameters\n");
474 for (i = 0; i < nb_token; i++) {
476 int_fld[i] = strtoul(str_fld[i], &end, 0);
477 if (errno != 0 || end == str_fld[i]) {
478 printf("Invalid config parameters\n");
484 port_id = (uint8_t)int_fld[i++];
485 if (port_id >= RTE_MAX_ETHPORTS) {
486 printf("Port ID %d could not exceed the maximum %d\n",
487 port_id, RTE_MAX_ETHPORTS);
490 if (kni_port_params_array[port_id]) {
491 printf("Port %d has been configured\n", port_id);
494 kni_port_params_array[port_id] =
495 (struct kni_port_params*)rte_zmalloc("KNI_port_params",
496 sizeof(struct kni_port_params), CACHE_LINE_SIZE);
497 kni_port_params_array[port_id]->port_id = port_id;
498 kni_port_params_array[port_id]->lcore_rx =
499 (uint8_t)int_fld[i++];
500 kni_port_params_array[port_id]->lcore_tx =
501 (uint8_t)int_fld[i++];
502 if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE ||
503 kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) {
504 printf("lcore_rx %u or lcore_tx %u ID could not "
505 "exceed the maximum %u\n",
506 kni_port_params_array[port_id]->lcore_rx,
507 kni_port_params_array[port_id]->lcore_tx,
508 (unsigned)RTE_MAX_LCORE);
511 for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++)
512 kni_port_params_array[port_id]->lcore_k[j] =
514 kni_port_params_array[port_id]->nb_lcore_k = j;
521 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
522 if (kni_port_params_array[i]) {
523 rte_free(kni_port_params_array[i]);
524 kni_port_params_array[i] = NULL;
532 validate_parameters(uint32_t portmask)
537 printf("No port configured in port mask\n");
541 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
542 if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
543 (!(portmask & (1 << i)) && kni_port_params_array[i]))
544 rte_exit(EXIT_FAILURE, "portmask is not consistent "
545 "to port ids specified in --config\n");
547 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
548 (unsigned)(kni_port_params_array[i]->lcore_rx)))
549 rte_exit(EXIT_FAILURE, "lcore id %u for "
550 "port %d receiving not enabled\n",
551 kni_port_params_array[i]->lcore_rx,
552 kni_port_params_array[i]->port_id);
554 if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
555 (unsigned)(kni_port_params_array[i]->lcore_tx)))
556 rte_exit(EXIT_FAILURE, "lcore id %u for "
557 "port %d transmitting not enabled\n",
558 kni_port_params_array[i]->lcore_tx,
559 kni_port_params_array[i]->port_id);
566 #define CMDLINE_OPT_CONFIG "config"
568 /* Parse the arguments given in the command line of the application */
570 parse_args(int argc, char **argv)
572 int opt, longindex, ret = 0;
573 const char *prgname = argv[0];
574 static struct option longopts[] = {
575 {CMDLINE_OPT_CONFIG, required_argument, NULL, 0},
579 /* Disable printing messages within getopt() */
582 /* Parse command line */
583 while ((opt = getopt_long(argc, argv, "p:P", longopts,
584 &longindex)) != EOF) {
587 ports_mask = parse_unsigned(optarg);
593 if (!strncmp(longopts[longindex].name,
595 sizeof(CMDLINE_OPT_CONFIG))) {
596 ret = parse_config(optarg);
598 printf("Invalid config\n");
599 print_usage(prgname);
605 print_usage(prgname);
606 rte_exit(EXIT_FAILURE, "Invalid option specified\n");
610 /* Check that options were parsed ok */
611 if (validate_parameters(ports_mask) < 0) {
612 print_usage(prgname);
613 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
619 /* Initialise a single port on an Ethernet device */
621 init_port(uint8_t port)
625 /* Initialise device and RX/TX queues */
626 RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
628 ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
630 rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
631 (unsigned)port, ret);
633 ret = rte_eth_rx_queue_setup(port, 0, NB_RXD,
634 rte_eth_dev_socket_id(port), &rx_conf, pktmbuf_pool);
636 rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
637 "port%u (%d)\n", (unsigned)port, ret);
639 ret = rte_eth_tx_queue_setup(port, 0, NB_TXD,
640 rte_eth_dev_socket_id(port), &tx_conf);
642 rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
643 "port%u (%d)\n", (unsigned)port, ret);
645 ret = rte_eth_dev_start(port);
647 rte_exit(EXIT_FAILURE, "Could not start port%u (%d)\n",
648 (unsigned)port, ret);
651 rte_eth_promiscuous_enable(port);
654 /* Check the link status of all ports in up to 9s, and print them finally */
656 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
658 #define CHECK_INTERVAL 100 /* 100ms */
659 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
660 uint8_t portid, count, all_ports_up, print_flag = 0;
661 struct rte_eth_link link;
663 printf("\nChecking link status\n");
665 for (count = 0; count <= MAX_CHECK_TIME; count++) {
667 for (portid = 0; portid < port_num; portid++) {
668 if ((port_mask & (1 << portid)) == 0)
670 memset(&link, 0, sizeof(link));
671 rte_eth_link_get_nowait(portid, &link);
672 /* print link status if flag set */
673 if (print_flag == 1) {
674 if (link.link_status)
675 printf("Port %d Link Up - speed %u "
676 "Mbps - %s\n", (uint8_t)portid,
677 (unsigned)link.link_speed,
678 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
679 ("full-duplex") : ("half-duplex\n"));
681 printf("Port %d Link Down\n",
685 /* clear all_ports_up flag if any link down */
686 if (link.link_status == 0) {
691 /* after finally printing all link status, get out */
695 if (all_ports_up == 0) {
698 rte_delay_ms(CHECK_INTERVAL);
701 /* set the print_flag if all ports up or timeout */
702 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
709 /* Callback for request of changing MTU */
711 kni_change_mtu(uint8_t port_id, unsigned new_mtu)
714 struct rte_eth_conf conf;
716 if (port_id >= rte_eth_dev_count()) {
717 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
721 RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu);
723 /* Stop specific port */
724 rte_eth_dev_stop(port_id);
726 memcpy(&conf, &port_conf, sizeof(conf));
728 if (new_mtu > ETHER_MAX_LEN)
729 conf.rxmode.jumbo_frame = 1;
731 conf.rxmode.jumbo_frame = 0;
733 /* mtu + length of header + length of FCS = max pkt length */
734 conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
736 ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
738 RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
742 /* Restart specific port */
743 ret = rte_eth_dev_start(port_id);
745 RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id);
752 /* Callback for request of configuring network interface up/down */
754 kni_config_network_interface(uint8_t port_id, uint8_t if_up)
758 if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
759 RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
763 RTE_LOG(INFO, APP, "Configure network interface of %d %s\n",
764 port_id, if_up ? "up" : "down");
766 if (if_up != 0) { /* Configure network interface up */
767 rte_eth_dev_stop(port_id);
768 ret = rte_eth_dev_start(port_id);
769 } else /* Configure network interface down */
770 rte_eth_dev_stop(port_id);
773 RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id);
779 kni_alloc(uint8_t port_id)
783 struct rte_kni_conf conf;
784 struct kni_port_params **params = kni_port_params_array;
786 if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
789 params[port_id]->nb_kni = params[port_id]->nb_lcore_k ?
790 params[port_id]->nb_lcore_k : 1;
792 for (i = 0; i < params[port_id]->nb_kni; i++) {
793 /* Clear conf at first */
794 memset(&conf, 0, sizeof(conf));
795 if (params[port_id]->nb_lcore_k) {
796 rte_snprintf(conf.name, RTE_KNI_NAMESIZE,
797 "vEth%u_%u", port_id, i);
798 conf.core_id = params[port_id]->lcore_k[i];
801 rte_snprintf(conf.name, RTE_KNI_NAMESIZE,
803 conf.group_id = (uint16_t)port_id;
804 conf.mbuf_size = MAX_PACKET_SZ;
806 * The first KNI device associated to a port
807 * is the master, for multiple kernel thread
811 struct rte_kni_ops ops;
812 struct rte_eth_dev_info dev_info;
814 memset(&dev_info, 0, sizeof(dev_info));
815 rte_eth_dev_info_get(port_id, &dev_info);
816 conf.addr = dev_info.pci_dev->addr;
817 conf.id = dev_info.pci_dev->id;
819 memset(&ops, 0, sizeof(ops));
820 ops.port_id = port_id;
821 ops.change_mtu = kni_change_mtu;
822 ops.config_network_if = kni_config_network_interface;
824 kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
826 kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);
829 rte_exit(EXIT_FAILURE, "Fail to create kni for "
830 "port: %d\n", port_id);
831 params[port_id]->kni[i] = kni;
838 kni_free_kni(uint8_t port_id)
841 struct kni_port_params **p = kni_port_params_array;
843 if (port_id >= RTE_MAX_ETHPORTS || !p[port_id])
846 for (i = 0; i < p[i]->nb_kni; i++) {
847 rte_kni_release(p[i]->kni[i]);
850 rte_eth_dev_stop(port_id);
855 /* Initialise ports/queues etc. and start main loop on each core */
857 main(int argc, char** argv)
860 uint8_t nb_sys_ports, port;
863 /* Associate signal_hanlder function with USR signals */
864 signal(SIGUSR1, signal_handler);
865 signal(SIGUSR2, signal_handler);
866 signal(SIGRTMIN, signal_handler);
869 ret = rte_eal_init(argc, argv);
871 rte_exit(EXIT_FAILURE, "Could not initialise EAL (%d)\n", ret);
875 /* Parse application arguments (after the EAL ones) */
876 ret = parse_args(argc, argv);
878 rte_exit(EXIT_FAILURE, "Could not parse input parameters\n");
880 /* Create the mbuf pool */
881 pktmbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SZ,
883 sizeof(struct rte_pktmbuf_pool_private),
884 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
886 if (pktmbuf_pool == NULL) {
887 rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool\n");
891 /* Initialise PMD driver(s) */
892 ret = rte_pmd_init_all();
894 rte_exit(EXIT_FAILURE, "Could not initialise PMD (%d)\n", ret);
896 /* Scan PCI bus for recognised devices */
897 ret = rte_eal_pci_probe();
899 rte_exit(EXIT_FAILURE, "Could not probe PCI (%d)\n", ret);
901 /* Get number of ports found in scan */
902 nb_sys_ports = rte_eth_dev_count();
903 if (nb_sys_ports == 0)
904 rte_exit(EXIT_FAILURE, "No supported Ethernet devices found - "
905 "check that CONFIG_RTE_LIBRTE_IGB_PMD=y and/or "
906 "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in the config file\n");
908 /* Check if the configured port ID is valid */
909 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
910 if (kni_port_params_array[i] && i >= nb_sys_ports)
911 rte_exit(EXIT_FAILURE, "Configured invalid "
914 /* Initialise each port */
915 for (port = 0; port < nb_sys_ports; port++) {
916 /* Skip ports that are not enabled */
917 if (!(ports_mask & (1 << port)))
921 if (port >= RTE_MAX_ETHPORTS)
922 rte_exit(EXIT_FAILURE, "Can not use more than "
923 "%d ports for kni\n", RTE_MAX_ETHPORTS);
927 check_all_ports_link_status(nb_sys_ports, ports_mask);
929 /* Launch per-lcore function on every lcore */
930 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
931 RTE_LCORE_FOREACH_SLAVE(i) {
932 if (rte_eal_wait_lcore(i) < 0)
936 /* Release resources */
937 for (port = 0; port < nb_sys_ports; port++) {
938 if (!(ports_mask & (1 << port)))
942 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
943 if (kni_port_params_array[i]) {
944 rte_free(kni_port_params_array[i]);
945 kni_port_params_array[i] = NULL;