1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_malloc.h>
23 #include <rte_vhost.h>
26 #include <rte_pause.h>
27 #include <rte_dmadev.h>
28 #include <rte_vhost_async.h>
33 #define MAX_QUEUES 128
36 #define NUM_MBUFS_DEFAULT 0x24000
38 /* the maximum number of external ports supported */
39 #define MAX_SUP_PORTS 1
41 #define MBUF_CACHE_SIZE 128
42 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
44 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
46 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
47 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
49 #define JUMBO_FRAME_MAX_SIZE 0x2600
50 #define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
52 /* State of virtio device. */
53 #define DEVICE_MAC_LEARNING 0
55 #define DEVICE_SAFE_REMOVE 2
57 /* Configurable number of RX/TX ring descriptors */
58 #define RTE_TEST_RX_DESC_DEFAULT 1024
59 #define RTE_TEST_TX_DESC_DEFAULT 512
61 #define INVALID_PORT_ID 0xFF
62 #define INVALID_DMA_ID -1
64 #define DMA_RING_SIZE 4096
66 /* number of mbufs in all pools - if specified on command-line. */
67 static int total_num_mbufs = NUM_MBUFS_DEFAULT;
69 struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
70 int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
73 /* mask of enabled ports */
74 static uint32_t enabled_port_mask = 0;
76 /* Promiscuous mode */
77 static uint32_t promiscuous;
79 /* number of devices/queues to support*/
80 static uint32_t num_queues = 0;
81 static uint32_t num_devices;
83 static struct rte_mempool *mbuf_pool;
86 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
93 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
96 static uint32_t enable_stats = 0;
97 /* Enable retries on RX. */
98 static uint32_t enable_retry = 1;
100 /* Disable TX checksum offload */
101 static uint32_t enable_tx_csum;
103 /* Disable TSO offload */
104 static uint32_t enable_tso;
106 static int client_mode;
108 static int builtin_net_driver;
110 /* Specify timeout (in useconds) between retries on RX. */
111 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
112 /* Specify the number of retries on RX. */
113 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
115 /* Socket file paths. Can be set by user */
116 static char *socket_files;
117 static int nb_sockets;
119 /* empty VMDq configuration structure. Filled in programmatically */
120 static struct rte_eth_conf vmdq_conf_default = {
122 .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
125 * VLAN strip is necessary for 1G NIC such as I350,
126 * this fixes bug of ipv4 forwarding in guest can't
127 * forward packets from one virtio dev to another virtio dev.
129 .offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
133 .mq_mode = RTE_ETH_MQ_TX_NONE,
134 .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
135 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
136 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
137 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
138 RTE_ETH_TX_OFFLOAD_TCP_TSO),
142 * should be overridden separately in code with
146 .nb_queue_pools = RTE_ETH_8_POOLS,
147 .enable_default_pool = 0,
150 .pool_map = {{0, 0},},
156 static unsigned lcore_ids[RTE_MAX_LCORE];
157 static uint16_t ports[RTE_MAX_ETHPORTS];
158 static unsigned num_ports = 0; /**< The number of ports specified in command line */
159 static uint16_t num_pf_queues, num_vmdq_queues;
160 static uint16_t vmdq_pool_base, vmdq_queue_base;
161 static uint16_t queues_per_pool;
163 const uint16_t vlan_tags[] = {
164 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
165 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
166 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
167 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
168 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
169 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
170 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
171 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
174 /* ethernet addresses of ports */
175 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
177 static struct vhost_dev_tailq_list vhost_dev_list =
178 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
180 static struct lcore_info lcore_info[RTE_MAX_LCORE];
182 /* Used for queueing bursts of TX packets. */
186 struct rte_mbuf *m_table[MAX_PKT_BURST];
189 struct vhost_bufftable {
192 struct rte_mbuf *m_table[MAX_PKT_BURST];
195 /* TX queue for each data core. */
196 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
199 * Vhost TX buffer for each data core.
200 * Every data core maintains a TX buffer for every vhost device,
201 * which is used for batch pkts enqueue for higher performance.
203 struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * RTE_MAX_VHOST_DEVICE];
205 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
206 / US_PER_S * BURST_TX_DRAIN_US)
209 is_dma_configured(int16_t dev_id)
213 for (i = 0; i < dma_count; i++)
214 if (dmas_id[i] == dev_id)
220 open_dma(const char *value)
222 struct dma_for_vhost *dma_info = dma_bind;
223 char *input = strndup(value, strlen(value) + 1);
226 char *start, *end, *substr;
229 struct rte_dma_info info;
230 struct rte_dma_conf dev_config = { .nb_vchans = 1 };
231 struct rte_dma_vchan_conf qconf = {
232 .direction = RTE_DMA_DIR_MEM_TO_MEM,
233 .nb_desc = DMA_RING_SIZE
239 char *dma_arg[RTE_MAX_VHOST_DEVICE];
242 while (isblank(*addrs))
244 if (*addrs == '\0') {
249 /* process DMA devices within bracket. */
251 substr = strtok(addrs, ";]");
257 args_nr = rte_strsplit(substr, strlen(substr), dma_arg, RTE_MAX_VHOST_DEVICE, ',');
263 while (i < args_nr) {
264 char *arg_temp = dma_arg[i];
267 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
273 start = strstr(ptrs[0], "txd");
280 vid = strtol(start, &end, 0);
286 dev_id = rte_dma_get_dev_id_by_name(ptrs[1]);
288 RTE_LOG(ERR, VHOST_CONFIG, "Fail to find DMA %s.\n", ptrs[1]);
293 /* DMA device is already configured, so skip */
294 if (is_dma_configured(dev_id))
297 if (rte_dma_info_get(dev_id, &info) != 0) {
298 RTE_LOG(ERR, VHOST_CONFIG, "Error with rte_dma_info_get()\n");
303 if (info.max_vchans < 1) {
304 RTE_LOG(ERR, VHOST_CONFIG, "No channels available on device %d\n", dev_id);
309 if (rte_dma_configure(dev_id, &dev_config) != 0) {
310 RTE_LOG(ERR, VHOST_CONFIG, "Fail to configure DMA %d.\n", dev_id);
315 /* Check the max desc supported by DMA device */
316 rte_dma_info_get(dev_id, &info);
317 if (info.nb_vchans != 1) {
318 RTE_LOG(ERR, VHOST_CONFIG, "No configured queues reported by DMA %d.\n",
324 qconf.nb_desc = RTE_MIN(DMA_RING_SIZE, info.max_desc);
326 if (rte_dma_vchan_setup(dev_id, 0, &qconf) != 0) {
327 RTE_LOG(ERR, VHOST_CONFIG, "Fail to set up DMA %d.\n", dev_id);
332 if (rte_dma_start(dev_id) != 0) {
333 RTE_LOG(ERR, VHOST_CONFIG, "Fail to start DMA %u.\n", dev_id);
338 dmas_id[dma_count++] = dev_id;
341 (dma_info + vid)->dmas[VIRTIO_RXQ].dev_id = dev_id;
350 * Builds up the correct configuration for VMDQ VLAN pool map
351 * according to the pool & queue limits.
354 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
356 struct rte_eth_vmdq_rx_conf conf;
357 struct rte_eth_vmdq_rx_conf *def_conf =
358 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
361 memset(&conf, 0, sizeof(conf));
362 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
363 conf.nb_pool_maps = num_devices;
364 conf.enable_loop_back = def_conf->enable_loop_back;
365 conf.rx_mode = def_conf->rx_mode;
367 for (i = 0; i < conf.nb_pool_maps; i++) {
368 conf.pool_map[i].vlan_id = vlan_tags[ i ];
369 conf.pool_map[i].pools = (1UL << i);
372 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
373 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
374 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
379 * Initialises a given port using global settings and with the rx buffers
380 * coming from the mbuf_pool passed as parameter
383 port_init(uint16_t port)
385 struct rte_eth_dev_info dev_info;
386 struct rte_eth_conf port_conf;
387 struct rte_eth_rxconf *rxconf;
388 struct rte_eth_txconf *txconf;
389 int16_t rx_rings, tx_rings;
390 uint16_t rx_ring_size, tx_ring_size;
394 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
395 retval = rte_eth_dev_info_get(port, &dev_info);
397 RTE_LOG(ERR, VHOST_PORT,
398 "Error during getting device (port %u) info: %s\n",
399 port, strerror(-retval));
404 rxconf = &dev_info.default_rxconf;
405 txconf = &dev_info.default_txconf;
406 rxconf->rx_drop_en = 1;
408 /*configure the number of supported virtio devices based on VMDQ limits */
409 num_devices = dev_info.max_vmdq_pools;
411 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
412 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
414 tx_rings = (uint16_t)rte_lcore_count();
417 if (dev_info.max_mtu != UINT16_MAX && dev_info.max_rx_pktlen > dev_info.max_mtu)
418 vmdq_conf_default.rxmode.mtu = dev_info.max_mtu;
420 vmdq_conf_default.rxmode.mtu = MAX_MTU;
423 /* Get port configuration. */
424 retval = get_eth_conf(&port_conf, num_devices);
427 /* NIC queues are divided into pf queues and vmdq queues. */
428 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
429 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
430 num_vmdq_queues = num_devices * queues_per_pool;
431 num_queues = num_pf_queues + num_vmdq_queues;
432 vmdq_queue_base = dev_info.vmdq_queue_base;
433 vmdq_pool_base = dev_info.vmdq_pool_base;
434 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
435 num_pf_queues, num_devices, queues_per_pool);
437 if (!rte_eth_dev_is_valid_port(port))
440 rx_rings = (uint16_t)dev_info.max_rx_queues;
441 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
442 port_conf.txmode.offloads |=
443 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
444 /* Configure ethernet device. */
445 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
447 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
448 port, strerror(-retval));
452 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
455 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
456 "for port %u: %s.\n", port, strerror(-retval));
459 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
460 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
461 "for Rx queues on port %u.\n", port);
465 /* Setup the queues. */
466 rxconf->offloads = port_conf.rxmode.offloads;
467 for (q = 0; q < rx_rings; q ++) {
468 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
469 rte_eth_dev_socket_id(port),
473 RTE_LOG(ERR, VHOST_PORT,
474 "Failed to setup rx queue %u of port %u: %s.\n",
475 q, port, strerror(-retval));
479 txconf->offloads = port_conf.txmode.offloads;
480 for (q = 0; q < tx_rings; q ++) {
481 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
482 rte_eth_dev_socket_id(port),
485 RTE_LOG(ERR, VHOST_PORT,
486 "Failed to setup tx queue %u of port %u: %s.\n",
487 q, port, strerror(-retval));
492 /* Start the device. */
493 retval = rte_eth_dev_start(port);
495 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
496 port, strerror(-retval));
501 retval = rte_eth_promiscuous_enable(port);
503 RTE_LOG(ERR, VHOST_PORT,
504 "Failed to enable promiscuous mode on port %u: %s\n",
505 port, rte_strerror(-retval));
510 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
512 RTE_LOG(ERR, VHOST_PORT,
513 "Failed to get MAC address on port %u: %s\n",
514 port, rte_strerror(-retval));
518 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
519 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
520 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
521 port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
527 * Set socket file path.
530 us_vhost_parse_socket_path(const char *q_arg)
534 /* parse number string */
535 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
539 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
540 if (socket_files == NULL) {
545 strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
552 * Parse the portmask provided at run time.
555 parse_portmask(const char *portmask)
562 /* parse hexadecimal string */
563 pm = strtoul(portmask, &end, 16);
564 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
572 * Parse num options at run time.
575 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
582 /* parse unsigned int string */
583 num = strtoul(q_arg, &end, 10);
584 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
587 if (num > max_valid_value)
598 us_vhost_usage(const char *prgname)
600 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
602 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
603 " --socket-file <path>\n"
605 " -p PORTMASK: Set mask for ports to be used by application\n"
606 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
607 " --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
608 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
609 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
610 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
611 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
612 " --socket-file: The path of the socket file.\n"
613 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
614 " --tso [0|1] disable/enable TCP segment offload.\n"
615 " --client register a vhost-user socket as client mode.\n"
616 " --dmas register dma channel for specific vhost device.\n"
617 " --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
622 #define OPT_VM2VM "vm2vm"
624 #define OPT_RX_RETRY "rx-retry"
626 #define OPT_RX_RETRY_DELAY "rx-retry-delay"
627 OPT_RX_RETRY_DELAY_NUM,
628 #define OPT_RX_RETRY_NUMB "rx-retry-num"
629 OPT_RX_RETRY_NUMB_NUM,
630 #define OPT_MERGEABLE "mergeable"
632 #define OPT_STATS "stats"
634 #define OPT_SOCKET_FILE "socket-file"
636 #define OPT_TX_CSUM "tx-csum"
638 #define OPT_TSO "tso"
640 #define OPT_CLIENT "client"
642 #define OPT_BUILTIN_NET_DRIVER "builtin-net-driver"
643 OPT_BUILTIN_NET_DRIVER_NUM,
644 #define OPT_DMAS "dmas"
646 #define OPT_NUM_MBUFS "total-num-mbufs"
651 * Parse the arguments given in the command line of the application.
654 us_vhost_parse_args(int argc, char **argv)
659 const char *prgname = argv[0];
660 static struct option long_option[] = {
661 {OPT_VM2VM, required_argument,
662 NULL, OPT_VM2VM_NUM},
663 {OPT_RX_RETRY, required_argument,
664 NULL, OPT_RX_RETRY_NUM},
665 {OPT_RX_RETRY_DELAY, required_argument,
666 NULL, OPT_RX_RETRY_DELAY_NUM},
667 {OPT_RX_RETRY_NUMB, required_argument,
668 NULL, OPT_RX_RETRY_NUMB_NUM},
669 {OPT_MERGEABLE, required_argument,
670 NULL, OPT_MERGEABLE_NUM},
671 {OPT_STATS, required_argument,
672 NULL, OPT_STATS_NUM},
673 {OPT_SOCKET_FILE, required_argument,
674 NULL, OPT_SOCKET_FILE_NUM},
675 {OPT_TX_CSUM, required_argument,
676 NULL, OPT_TX_CSUM_NUM},
677 {OPT_TSO, required_argument,
679 {OPT_CLIENT, no_argument,
680 NULL, OPT_CLIENT_NUM},
681 {OPT_BUILTIN_NET_DRIVER, no_argument,
682 NULL, OPT_BUILTIN_NET_DRIVER_NUM},
683 {OPT_DMAS, required_argument,
685 {OPT_NUM_MBUFS, required_argument,
686 NULL, OPT_NUM_MBUFS_NUM},
690 /* Parse command line */
691 while ((opt = getopt_long(argc, argv, "p:P",
692 long_option, &option_index)) != EOF) {
696 enabled_port_mask = parse_portmask(optarg);
697 if (enabled_port_mask == 0) {
698 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
699 us_vhost_usage(prgname);
706 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
707 RTE_ETH_VMDQ_ACCEPT_BROADCAST |
708 RTE_ETH_VMDQ_ACCEPT_MULTICAST;
712 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
714 RTE_LOG(INFO, VHOST_CONFIG,
715 "Invalid argument for "
717 us_vhost_usage(prgname);
720 vm2vm_mode = (vm2vm_type)ret;
723 case OPT_RX_RETRY_NUM:
724 ret = parse_num_opt(optarg, 1);
726 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
727 us_vhost_usage(prgname);
733 case OPT_TX_CSUM_NUM:
734 ret = parse_num_opt(optarg, 1);
736 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
737 us_vhost_usage(prgname);
740 enable_tx_csum = ret;
744 ret = parse_num_opt(optarg, 1);
746 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
747 us_vhost_usage(prgname);
753 case OPT_RX_RETRY_DELAY_NUM:
754 ret = parse_num_opt(optarg, INT32_MAX);
756 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
757 us_vhost_usage(prgname);
760 burst_rx_delay_time = ret;
763 case OPT_RX_RETRY_NUMB_NUM:
764 ret = parse_num_opt(optarg, INT32_MAX);
766 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
767 us_vhost_usage(prgname);
770 burst_rx_retry_num = ret;
773 case OPT_MERGEABLE_NUM:
774 ret = parse_num_opt(optarg, 1);
776 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
777 us_vhost_usage(prgname);
784 ret = parse_num_opt(optarg, INT32_MAX);
786 RTE_LOG(INFO, VHOST_CONFIG,
787 "Invalid argument for stats [0..N]\n");
788 us_vhost_usage(prgname);
794 /* Set socket file path. */
795 case OPT_SOCKET_FILE_NUM:
796 if (us_vhost_parse_socket_path(optarg) == -1) {
797 RTE_LOG(INFO, VHOST_CONFIG,
798 "Invalid argument for socket name (Max %d characters)\n",
800 us_vhost_usage(prgname);
806 if (open_dma(optarg) == -1) {
807 RTE_LOG(INFO, VHOST_CONFIG,
809 us_vhost_usage(prgname);
814 case OPT_NUM_MBUFS_NUM:
815 ret = parse_num_opt(optarg, INT32_MAX);
817 RTE_LOG(INFO, VHOST_CONFIG,
818 "Invalid argument for total-num-mbufs [0..N]\n");
819 us_vhost_usage(prgname);
823 if (total_num_mbufs < ret)
824 total_num_mbufs = ret;
831 case OPT_BUILTIN_NET_DRIVER_NUM:
832 builtin_net_driver = 1;
835 /* Invalid option - print options. */
837 us_vhost_usage(prgname);
842 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
843 if (enabled_port_mask & (1 << i))
844 ports[num_ports++] = i;
847 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
848 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
849 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
857 * Update the global var NUM_PORTS and array PORTS according to system ports number
858 * and return valid ports number
860 static unsigned check_ports_num(unsigned nb_ports)
862 unsigned valid_num_ports = num_ports;
865 if (num_ports > nb_ports) {
866 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
867 num_ports, nb_ports);
868 num_ports = nb_ports;
871 for (portid = 0; portid < num_ports; portid ++) {
872 if (!rte_eth_dev_is_valid_port(ports[portid])) {
873 RTE_LOG(INFO, VHOST_PORT,
874 "\nSpecified port ID(%u) is not valid\n",
876 ports[portid] = INVALID_PORT_ID;
880 return valid_num_ports;
883 static __rte_always_inline struct vhost_dev *
884 find_vhost_dev(struct rte_ether_addr *mac)
886 struct vhost_dev *vdev;
888 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
889 if (vdev->ready == DEVICE_RX &&
890 rte_is_same_ether_addr(mac, &vdev->mac_address))
898 * This function learns the MAC address of the device and registers this along with a
899 * vlan tag to a VMDQ.
902 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
904 struct rte_ether_hdr *pkt_hdr;
907 /* Learn MAC address of guest device from packet */
908 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
910 if (find_vhost_dev(&pkt_hdr->src_addr)) {
911 RTE_LOG(ERR, VHOST_DATA,
912 "(%d) device is using a registered MAC!\n",
917 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
918 vdev->mac_address.addr_bytes[i] =
919 pkt_hdr->src_addr.addr_bytes[i];
921 /* vlan_tag currently uses the device_id. */
922 vdev->vlan_tag = vlan_tags[vdev->vid];
924 /* Print out VMDQ registration info. */
925 RTE_LOG(INFO, VHOST_DATA,
926 "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
927 vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
930 /* Register the MAC address. */
931 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
932 (uint32_t)vdev->vid + vmdq_pool_base);
934 RTE_LOG(ERR, VHOST_DATA,
935 "(%d) failed to add device MAC address to VMDQ\n",
938 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
940 /* Set device as ready for RX. */
941 vdev->ready = DEVICE_RX;
947 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
948 * queue before disabling RX on the device.
951 unlink_vmdq(struct vhost_dev *vdev)
955 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
957 if (vdev->ready == DEVICE_RX) {
958 /*clear MAC and VLAN settings*/
959 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
960 for (i = 0; i < 6; i++)
961 vdev->mac_address.addr_bytes[i] = 0;
965 /*Clear out the receive buffers*/
966 rx_count = rte_eth_rx_burst(ports[0],
967 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
970 for (i = 0; i < rx_count; i++)
971 rte_pktmbuf_free(pkts_burst[i]);
973 rx_count = rte_eth_rx_burst(ports[0],
974 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
977 vdev->ready = DEVICE_MAC_LEARNING;
982 free_pkts(struct rte_mbuf **pkts, uint16_t n)
985 rte_pktmbuf_free(pkts[n]);
988 static __rte_always_inline void
989 complete_async_pkts(struct vhost_dev *vdev)
991 struct rte_mbuf *p_cpl[MAX_PKT_BURST];
992 uint16_t complete_count;
993 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
995 complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
996 VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
997 if (complete_count) {
998 free_pkts(p_cpl, complete_count);
999 __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
1004 static __rte_always_inline void
1005 sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
1010 if (builtin_net_driver) {
1011 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
1013 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
1017 __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
1019 __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
1021 src_vdev->stats.tx_total++;
1022 src_vdev->stats.tx += ret;
1026 static __rte_always_inline void
1027 drain_vhost(struct vhost_dev *vdev)
1030 uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
1031 uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
1032 struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
1034 if (builtin_net_driver) {
1035 ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
1036 } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
1037 uint16_t enqueue_fail = 0;
1038 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
1040 complete_async_pkts(vdev);
1041 ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
1042 __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
1044 enqueue_fail = nr_xmit - ret;
1046 free_pkts(&m[ret], nr_xmit - ret);
1048 ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1053 __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
1055 __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
1059 if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
1060 free_pkts(m, nr_xmit);
1063 static __rte_always_inline void
1064 drain_vhost_table(void)
1066 uint16_t lcore_id = rte_lcore_id();
1067 struct vhost_bufftable *vhost_txq;
1068 struct vhost_dev *vdev;
1071 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1072 if (unlikely(vdev->remove == 1))
1075 vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid];
1077 cur_tsc = rte_rdtsc();
1078 if (unlikely(cur_tsc - vhost_txq->pre_tsc
1079 > MBUF_TABLE_DRAIN_TSC)) {
1080 RTE_LOG_DP(DEBUG, VHOST_DATA,
1081 "Vhost TX queue drained after timeout with burst size %u\n",
1085 vhost_txq->pre_tsc = cur_tsc;
1091 * Check if the packet destination MAC address is for a local device. If so then put
1092 * the packet on that devices RX queue. If not then return.
1094 static __rte_always_inline int
1095 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
1097 struct rte_ether_hdr *pkt_hdr;
1098 struct vhost_dev *dst_vdev;
1099 struct vhost_bufftable *vhost_txq;
1100 uint16_t lcore_id = rte_lcore_id();
1101 pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1103 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
1107 if (vdev->vid == dst_vdev->vid) {
1108 RTE_LOG_DP(DEBUG, VHOST_DATA,
1109 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1114 RTE_LOG_DP(DEBUG, VHOST_DATA,
1115 "(%d) TX: MAC address is local\n", dst_vdev->vid);
1117 if (unlikely(dst_vdev->remove)) {
1118 RTE_LOG_DP(DEBUG, VHOST_DATA,
1119 "(%d) device is marked for removal\n", dst_vdev->vid);
1123 vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + dst_vdev->vid];
1124 vhost_txq->m_table[vhost_txq->len++] = m;
1127 vdev->stats.tx_total++;
1131 if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
1132 drain_vhost(dst_vdev);
1134 vhost_txq->pre_tsc = rte_rdtsc();
1140 * Check if the destination MAC of a packet is one local VM,
1141 * and get its vlan tag, and offset if it is.
1143 static __rte_always_inline int
1144 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
1145 uint32_t *offset, uint16_t *vlan_tag)
1147 struct vhost_dev *dst_vdev;
1148 struct rte_ether_hdr *pkt_hdr =
1149 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1151 dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
1155 if (vdev->vid == dst_vdev->vid) {
1156 RTE_LOG_DP(DEBUG, VHOST_DATA,
1157 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1163 * HW vlan strip will reduce the packet length
1164 * by minus length of vlan tag, so need restore
1165 * the packet length by plus it.
1167 *offset = RTE_VLAN_HLEN;
1168 *vlan_tag = vlan_tags[vdev->vid];
1170 RTE_LOG_DP(DEBUG, VHOST_DATA,
1171 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
1172 vdev->vid, dst_vdev->vid, *vlan_tag);
1177 static void virtio_tx_offload(struct rte_mbuf *m)
1179 struct rte_net_hdr_lens hdr_lens;
1180 struct rte_ipv4_hdr *ipv4_hdr;
1181 struct rte_tcp_hdr *tcp_hdr;
1185 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1186 m->l2_len = hdr_lens.l2_len;
1187 m->l3_len = hdr_lens.l3_len;
1188 m->l4_len = hdr_lens.l4_len;
1190 l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
1191 tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
1192 m->l2_len + m->l3_len);
1194 m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1195 if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
1196 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
1197 m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1199 ipv4_hdr->hdr_checksum = 0;
1200 tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
1201 } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
1202 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
1203 tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
1207 static __rte_always_inline void
1208 do_drain_mbuf_table(struct mbuf_table *tx_q)
1212 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
1213 tx_q->m_table, tx_q->len);
1214 if (unlikely(count < tx_q->len))
1215 free_pkts(&tx_q->m_table[count], tx_q->len - count);
1221 * This function routes the TX packet to the correct interface. This
1222 * may be a local device or the physical port.
1224 static __rte_always_inline void
1225 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
1227 struct mbuf_table *tx_q;
1228 unsigned offset = 0;
1229 const uint16_t lcore_id = rte_lcore_id();
1230 struct rte_ether_hdr *nh;
1233 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1234 if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
1235 struct vhost_dev *vdev2;
1237 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
1239 sync_virtio_xmit(vdev2, vdev, m);
1244 /*check if destination is local VM*/
1245 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
1248 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1249 if (unlikely(find_local_dest(vdev, m, &offset,
1251 rte_pktmbuf_free(m);
1256 RTE_LOG_DP(DEBUG, VHOST_DATA,
1257 "(%d) TX: MAC address is external\n", vdev->vid);
1261 /*Add packet to the port tx queue*/
1262 tx_q = &lcore_tx_queue[lcore_id];
1264 nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1265 if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
1266 /* Guest has inserted the vlan tag. */
1267 struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
1268 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
1269 if ((vm2vm_mode == VM2VM_HARDWARE) &&
1270 (vh->vlan_tci != vlan_tag_be))
1271 vh->vlan_tci = vlan_tag_be;
1273 m->ol_flags |= RTE_MBUF_F_TX_VLAN;
1276 * Find the right seg to adjust the data len when offset is
1277 * bigger than tail room size.
1279 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1280 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1281 m->data_len += offset;
1283 struct rte_mbuf *seg = m;
1285 while ((seg->next != NULL) &&
1286 (offset > rte_pktmbuf_tailroom(seg)))
1289 seg->data_len += offset;
1291 m->pkt_len += offset;
1294 m->vlan_tci = vlan_tag;
1297 if (m->ol_flags & RTE_MBUF_F_RX_LRO)
1298 virtio_tx_offload(m);
1300 tx_q->m_table[tx_q->len++] = m;
1302 vdev->stats.tx_total++;
1306 if (unlikely(tx_q->len == MAX_PKT_BURST))
1307 do_drain_mbuf_table(tx_q);
1311 static __rte_always_inline void
1312 drain_mbuf_table(struct mbuf_table *tx_q)
1314 static uint64_t prev_tsc;
1320 cur_tsc = rte_rdtsc();
1321 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1324 RTE_LOG_DP(DEBUG, VHOST_DATA,
1325 "TX queue drained after timeout with burst size %u\n",
1327 do_drain_mbuf_table(tx_q);
1331 static __rte_always_inline void
1332 drain_eth_rx(struct vhost_dev *vdev)
1334 uint16_t rx_count, enqueue_count;
1335 struct rte_mbuf *pkts[MAX_PKT_BURST];
1337 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1338 pkts, MAX_PKT_BURST);
1344 * When "enable_retry" is set, here we wait and retry when there
1345 * is no enough free slots in the queue to hold @rx_count packets,
1346 * to diminish packet loss.
1349 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1353 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1354 rte_delay_us(burst_rx_delay_time);
1355 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1361 if (builtin_net_driver) {
1362 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1364 } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
1365 uint16_t enqueue_fail = 0;
1366 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
1368 complete_async_pkts(vdev);
1369 enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
1370 VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
1371 __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
1373 enqueue_fail = rx_count - enqueue_count;
1375 free_pkts(&pkts[enqueue_count], enqueue_fail);
1378 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1383 __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
1385 __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
1389 if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
1390 free_pkts(pkts, rx_count);
1393 static __rte_always_inline void
1394 drain_virtio_tx(struct vhost_dev *vdev)
1396 struct rte_mbuf *pkts[MAX_PKT_BURST];
1400 if (builtin_net_driver) {
1401 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1402 pkts, MAX_PKT_BURST);
1404 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1405 mbuf_pool, pkts, MAX_PKT_BURST);
1408 /* setup VMDq for the first packet */
1409 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1410 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1411 free_pkts(pkts, count);
1414 for (i = 0; i < count; ++i)
1415 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1419 * Main function of vhost-switch. It basically does:
1421 * for each vhost device {
1424 * Which drains the host eth Rx queue linked to the vhost device,
1425 * and deliver all of them to guest virito Rx ring associated with
1426 * this vhost device.
1428 * - drain_virtio_tx()
1430 * Which drains the guest virtio Tx queue and deliver all of them
1431 * to the target, which could be another vhost device, or the
1432 * physical eth dev. The route is done in function "virtio_tx_route".
1436 switch_worker(void *arg __rte_unused)
1439 unsigned lcore_id = rte_lcore_id();
1440 struct vhost_dev *vdev;
1441 struct mbuf_table *tx_q;
1443 RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
1445 tx_q = &lcore_tx_queue[lcore_id];
1446 for (i = 0; i < rte_lcore_count(); i++) {
1447 if (lcore_ids[i] == lcore_id) {
1454 drain_mbuf_table(tx_q);
1455 drain_vhost_table();
1457 * Inform the configuration core that we have exited the
1458 * linked list and that no devices are in use if requested.
1460 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1461 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1464 * Process vhost devices
1466 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1468 if (unlikely(vdev->remove)) {
1470 vdev->ready = DEVICE_SAFE_REMOVE;
1474 if (likely(vdev->ready == DEVICE_RX))
1477 if (likely(!vdev->remove))
1478 drain_virtio_tx(vdev);
1486 * Remove a device from the specific data core linked list and from the
1487 * main linked list. Synchronization occurs through the use of the
1488 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1489 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1492 destroy_device(int vid)
1494 struct vhost_dev *vdev = NULL;
1498 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1499 if (vdev->vid == vid)
1504 /*set the remove flag. */
1506 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1510 for (i = 0; i < RTE_MAX_LCORE; i++)
1511 rte_free(vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]);
1513 if (builtin_net_driver)
1514 vs_vhost_net_remove(vdev);
1516 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1518 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1521 /* Set the dev_removal_flag on each lcore. */
1522 RTE_LCORE_FOREACH_WORKER(lcore)
1523 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1526 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1527 * we can be sure that they can no longer access the device removed
1528 * from the linked lists and that the devices are no longer in use.
1530 RTE_LCORE_FOREACH_WORKER(lcore) {
1531 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1535 lcore_info[vdev->coreid].device_num--;
1537 RTE_LOG(INFO, VHOST_DATA,
1538 "(%d) device has been removed from data core\n",
1541 if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
1543 int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
1544 struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1546 while (vdev->pkts_inflight) {
1547 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
1548 m_cpl, vdev->pkts_inflight, dma_id, 0);
1549 free_pkts(m_cpl, n_pkt);
1550 __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1553 rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
1554 dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
1561 * A new device is added to a data core. First the device is added to the main linked list
1562 * and then allocated to a specific data core.
1567 int lcore, core_add = 0;
1569 uint32_t device_num_min = num_devices;
1570 struct vhost_dev *vdev;
1571 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1573 RTE_LOG(INFO, VHOST_DATA,
1574 "(%d) couldn't allocate memory for vhost dev\n",
1580 for (i = 0; i < RTE_MAX_LCORE; i++) {
1581 vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]
1582 = rte_zmalloc("vhost bufftable",
1583 sizeof(struct vhost_bufftable),
1584 RTE_CACHE_LINE_SIZE);
1586 if (vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid] == NULL) {
1587 RTE_LOG(INFO, VHOST_DATA,
1588 "(%d) couldn't allocate memory for vhost TX\n", vid);
1593 if (builtin_net_driver)
1594 vs_vhost_net_setup(vdev);
1596 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1597 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1599 /*reset ready flag*/
1600 vdev->ready = DEVICE_MAC_LEARNING;
1603 /* Find a suitable lcore to add the device. */
1604 RTE_LCORE_FOREACH_WORKER(lcore) {
1605 if (lcore_info[lcore].device_num < device_num_min) {
1606 device_num_min = lcore_info[lcore].device_num;
1610 vdev->coreid = core_add;
1612 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1614 lcore_info[vdev->coreid].device_num++;
1616 /* Disable notifications. */
1617 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1618 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1620 RTE_LOG(INFO, VHOST_DATA,
1621 "(%d) device has been added to data core %d\n",
1624 if (dma_bind[vid].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
1627 ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
1629 dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = true;
1637 vring_state_changed(int vid, uint16_t queue_id, int enable)
1639 struct vhost_dev *vdev = NULL;
1641 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1642 if (vdev->vid == vid)
1648 if (queue_id != VIRTIO_RXQ)
1651 if (dma_bind[vid].dmas[queue_id].async_enabled) {
1654 int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
1655 struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1657 while (vdev->pkts_inflight) {
1658 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
1659 m_cpl, vdev->pkts_inflight, dma_id, 0);
1660 free_pkts(m_cpl, n_pkt);
1661 __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1670 * These callback allow devices to be added to the data core when configuration
1671 * has been fully complete.
1673 static const struct rte_vhost_device_ops virtio_net_device_ops =
1675 .new_device = new_device,
1676 .destroy_device = destroy_device,
1677 .vring_state_changed = vring_state_changed,
1681 * This is a thread will wake up after a period to print stats if the user has
1685 print_stats(__rte_unused void *arg)
1687 struct vhost_dev *vdev;
1688 uint64_t tx_dropped, rx_dropped;
1689 uint64_t tx, tx_total, rx, rx_total;
1690 const char clr[] = { 27, '[', '2', 'J', '\0' };
1691 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1694 sleep(enable_stats);
1696 /* Clear screen and move to top left */
1697 printf("%s%s\n", clr, top_left);
1698 printf("Device statistics =================================\n");
1700 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1701 tx_total = vdev->stats.tx_total;
1702 tx = vdev->stats.tx;
1703 tx_dropped = tx_total - tx;
1705 rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
1707 rx = __atomic_load_n(&vdev->stats.rx_atomic,
1709 rx_dropped = rx_total - rx;
1711 printf("Statistics for device %d\n"
1712 "-----------------------\n"
1713 "TX total: %" PRIu64 "\n"
1714 "TX dropped: %" PRIu64 "\n"
1715 "TX successful: %" PRIu64 "\n"
1716 "RX total: %" PRIu64 "\n"
1717 "RX dropped: %" PRIu64 "\n"
1718 "RX successful: %" PRIu64 "\n",
1720 tx_total, tx_dropped, tx,
1721 rx_total, rx_dropped, rx);
1724 printf("===================================================\n");
1733 unregister_drivers(int socket_num)
1737 for (i = 0; i < socket_num; i++) {
1738 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1740 RTE_LOG(ERR, VHOST_CONFIG,
1741 "Fail to unregister vhost driver for %s.\n",
1742 socket_files + i * PATH_MAX);
1746 /* When we receive a INT signal, unregister vhost driver */
1748 sigint_handler(__rte_unused int signum)
1750 /* Unregister vhost driver. */
1751 unregister_drivers(nb_sockets);
1761 for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
1764 for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
1765 dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
1766 dma_bind[i].dmas[j].async_enabled = false;
1770 for (i = 0; i < RTE_DMADEV_DEFAULT_MAX; i++)
1771 dmas_id[i] = INVALID_DMA_ID;
1775 * Main function, does initialisation and calls the per-lcore functions.
1778 main(int argc, char *argv[])
1780 unsigned lcore_id, core_id = 0;
1781 unsigned nb_ports, valid_num_ports;
1784 static pthread_t tid;
1785 uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
1787 signal(SIGINT, sigint_handler);
1790 ret = rte_eal_init(argc, argv);
1792 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1796 /* initialize dma structures */
1799 /* parse app arguments */
1800 ret = us_vhost_parse_args(argc, argv);
1802 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1804 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1805 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1807 if (rte_lcore_is_enabled(lcore_id))
1808 lcore_ids[core_id++] = lcore_id;
1811 if (rte_lcore_count() > RTE_MAX_LCORE)
1812 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1814 /* Get the number of physical ports. */
1815 nb_ports = rte_eth_dev_count_avail();
1818 * Update the global var NUM_PORTS and global array PORTS
1819 * and get value of var VALID_NUM_PORTS according to system ports number
1821 valid_num_ports = check_ports_num(nb_ports);
1823 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1824 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1825 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1830 * FIXME: here we are trying to allocate mbufs big enough for
1831 * @MAX_QUEUES, but the truth is we're never going to use that
1832 * many queues here. We probably should only do allocation for
1833 * those queues we are going to use.
1835 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
1836 MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
1838 if (mbuf_pool == NULL)
1839 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1841 if (vm2vm_mode == VM2VM_HARDWARE) {
1842 /* Enable VT loop back to let L2 switch to do it. */
1843 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1844 RTE_LOG(DEBUG, VHOST_CONFIG,
1845 "Enable loop back for L2 switch in vmdq.\n");
1848 /* initialize all ports */
1849 RTE_ETH_FOREACH_DEV(portid) {
1850 /* skip ports that are not enabled */
1851 if ((enabled_port_mask & (1 << portid)) == 0) {
1852 RTE_LOG(INFO, VHOST_PORT,
1853 "Skipping disabled port %d\n", portid);
1856 if (port_init(portid) != 0)
1857 rte_exit(EXIT_FAILURE,
1858 "Cannot initialize network ports\n");
1861 /* Enable stats if the user option is set. */
1863 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1866 rte_exit(EXIT_FAILURE,
1867 "Cannot create print-stats thread\n");
1870 /* Launch all data cores. */
1871 RTE_LCORE_FOREACH_WORKER(lcore_id)
1872 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1875 flags |= RTE_VHOST_USER_CLIENT;
1877 for (i = 0; i < dma_count; i++) {
1878 if (rte_vhost_async_dma_configure(dmas_id[i], 0) < 0) {
1879 RTE_LOG(ERR, VHOST_PORT, "Failed to configure DMA in vhost.\n");
1880 rte_exit(EXIT_FAILURE, "Cannot use given DMA device\n");
1884 /* Register vhost user driver to handle vhost messages. */
1885 for (i = 0; i < nb_sockets; i++) {
1886 char *file = socket_files + i * PATH_MAX;
1889 flags = flags | RTE_VHOST_USER_ASYNC_COPY;
1891 ret = rte_vhost_driver_register(file, flags);
1893 unregister_drivers(i);
1894 rte_exit(EXIT_FAILURE,
1895 "vhost driver register failure.\n");
1898 if (builtin_net_driver)
1899 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1901 if (mergeable == 0) {
1902 rte_vhost_driver_disable_features(file,
1903 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1906 if (enable_tx_csum == 0) {
1907 rte_vhost_driver_disable_features(file,
1908 1ULL << VIRTIO_NET_F_CSUM);
1911 if (enable_tso == 0) {
1912 rte_vhost_driver_disable_features(file,
1913 1ULL << VIRTIO_NET_F_HOST_TSO4);
1914 rte_vhost_driver_disable_features(file,
1915 1ULL << VIRTIO_NET_F_HOST_TSO6);
1916 rte_vhost_driver_disable_features(file,
1917 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1918 rte_vhost_driver_disable_features(file,
1919 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1923 rte_vhost_driver_enable_features(file,
1924 1ULL << VIRTIO_NET_F_CTRL_RX);
1927 ret = rte_vhost_driver_callback_register(file,
1928 &virtio_net_device_ops);
1930 rte_exit(EXIT_FAILURE,
1931 "failed to register vhost driver callbacks.\n");
1934 if (rte_vhost_driver_start(file) < 0) {
1935 rte_exit(EXIT_FAILURE,
1936 "failed to start vhost driver.\n");
1940 RTE_LCORE_FOREACH_WORKER(lcore_id)
1941 rte_eal_wait_lcore(lcore_id);
1943 /* clean up the EAL */